2005-04-16 18:20:36 -04:00
|
|
|
#ifndef __irq_h
|
|
|
|
#define __irq_h
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Please do not include this file in generic code. There is currently
|
|
|
|
* no requirement for any architecture to implement anything held
|
|
|
|
* within this file.
|
|
|
|
*
|
|
|
|
* Thanks. --rmk
|
|
|
|
*/
|
|
|
|
|
2005-12-20 20:27:50 -05:00
|
|
|
#include <linux/smp.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2006-01-06 03:19:28 -05:00
|
|
|
#if !defined(CONFIG_S390)
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/cache.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/cpumask.h>
|
2006-06-23 05:06:00 -04:00
|
|
|
#include <linux/irqreturn.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* IRQ line status.
|
|
|
|
*/
|
|
|
|
#define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */
|
|
|
|
#define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */
|
|
|
|
#define IRQ_PENDING 4 /* IRQ pending - replay on enable */
|
|
|
|
#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
|
|
|
|
#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */
|
|
|
|
#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
|
|
|
|
#define IRQ_LEVEL 64 /* IRQ level triggered */
|
|
|
|
#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */
|
2005-09-06 18:17:25 -04:00
|
|
|
#if defined(ARCH_HAS_IRQ_PER_CPU)
|
|
|
|
# define IRQ_PER_CPU 256 /* IRQ is per CPU */
|
|
|
|
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
|
|
|
|
#else
|
|
|
|
# define CHECK_IRQ_PER_CPU(var) 0
|
|
|
|
#endif
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupt controller descriptor. This is all we need
|
|
|
|
* to describe about the low-level hardware.
|
|
|
|
*/
|
|
|
|
struct hw_interrupt_type {
|
|
|
|
const char * typename;
|
|
|
|
unsigned int (*startup)(unsigned int irq);
|
|
|
|
void (*shutdown)(unsigned int irq);
|
|
|
|
void (*enable)(unsigned int irq);
|
|
|
|
void (*disable)(unsigned int irq);
|
|
|
|
void (*ack)(unsigned int irq);
|
|
|
|
void (*end)(unsigned int irq);
|
|
|
|
void (*set_affinity)(unsigned int irq, cpumask_t dest);
|
2005-06-21 20:16:24 -04:00
|
|
|
/* Currently used only by UML, might disappear one day.*/
|
|
|
|
#ifdef CONFIG_IRQ_RELEASE_METHOD
|
[PATCH] uml: add and use generic hw_controller_type->release
With Chris Wedgwood <cw@f00f.org>
Currently UML must explicitly call the UML-specific
free_irq_by_irq_and_dev() for each free_irq call it's done.
This is needed because ->shutdown and/or ->disable are only called when the
last "action" for that irq is removed.
Instead, for UML shared IRQs (UML IRQs are very often, if not always,
shared), for each dev_id some setup is done, which must be cleared on the
release of that fd. For instance, for each open console a new instance
(i.e. new dev_id) of the same IRQ is requested().
Exactly, a fd is stored in an array (pollfds), which is after read by a
host thread and passed to poll(). Each event registered by poll() triggers
an interrupt. So, for each free_irq() we must remove the corresponding
host fd from the table, which we do via this -release() method.
In this patch we add an appropriate hook for this, and remove all uses of
it by pointing the hook to the said procedure; this is safe to do since the
said procedure.
Also some cosmetic improvements are included.
This is heavily based on some work by Chris Wedgwood, which however didn't
get the patch merged for something I'd call a "misunderstanding" (the need
for this patch wasn't cleanly explained, thus adding the generic hook was
felt as undesirable).
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
CC: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-21 20:16:19 -04:00
|
|
|
void (*release)(unsigned int irq, void *dev_id);
|
2005-06-21 20:16:24 -04:00
|
|
|
#endif
|
2005-04-16 18:20:36 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct hw_interrupt_type hw_irq_controller;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the "IRQ descriptor", which contains various information
|
|
|
|
* about the irq, including what kind of hardware handling it has,
|
|
|
|
* whether it is disabled etc etc.
|
|
|
|
*
|
|
|
|
* Pad this out to 32 bytes for cache and indexing reasons.
|
|
|
|
*/
|
|
|
|
typedef struct irq_desc {
|
[PATCH] genirq: rename desc->handler to desc->chip
This patch-queue improves the generic IRQ layer to be truly generic, by adding
various abstractions and features to it, without impacting existing
functionality.
While the queue can be best described as "fix and improve everything in the
generic IRQ layer that we could think of", and thus it consists of many
smaller features and lots of cleanups, the one feature that stands out most is
the new 'irq chip' abstraction.
The irq-chip abstraction is about describing and coding and IRQ controller
driver by mapping its raw hardware capabilities [and quirks, if needed] in a
straightforward way, without having to think about "IRQ flow"
(level/edge/etc.) type of details.
This stands in contrast with the current 'irq-type' model of genirq
architectures, which 'mixes' raw hardware capabilities with 'flow' details.
The patchset supports both types of irq controller designs at once, and
converts i386 and x86_64 to the new irq-chip design.
As a bonus side-effect of the irq-chip approach, chained interrupt controllers
(master/slave PIC constructs, etc.) are now supported by design as well.
The end result of this patchset intends to be simpler architecture-level code
and more consolidation between architectures.
We reused many bits of code and many concepts from Russell King's ARM IRQ
layer, the merging of which was one of the motivations for this patchset.
This patch:
rename desc->handler to desc->chip.
Originally i did not want to do this, because it's a big patch. But having
both "desc->handler", "desc->handle_irq" and "action->handler" caused a
large degree of confusion and made the code appear alot less clean than it
truly is.
I have also attempted a dual approach as well by introducing a
desc->chip alias - but that just wasnt robust enough and broke
frequently.
So lets get over with this quickly. The conversion was done automatically
via scripts and converts all the code in the kernel.
This renaming patch is the first one amongst the patches, so that the
remaining patches can stay flexible and can be merged and split up
without having some big monolithic patch act as a merge barrier.
[akpm@osdl.org: build fix]
[akpm@osdl.org: another build fix]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-29 05:24:36 -04:00
|
|
|
hw_irq_controller *chip;
|
|
|
|
void *chip_data;
|
2005-04-16 18:20:36 -04:00
|
|
|
struct irqaction *action; /* IRQ action list */
|
|
|
|
unsigned int status; /* IRQ status */
|
|
|
|
unsigned int depth; /* nested irq disables */
|
|
|
|
unsigned int irq_count; /* For detecting broken interrupts */
|
|
|
|
unsigned int irqs_unhandled;
|
|
|
|
spinlock_t lock;
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-06 18:16:15 -04:00
|
|
|
#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
|
|
|
|
unsigned int move_irq; /* Flag need to re-target intr dest*/
|
|
|
|
#endif
|
2005-04-16 18:20:36 -04:00
|
|
|
} ____cacheline_aligned irq_desc_t;
|
|
|
|
|
|
|
|
extern irq_desc_t irq_desc [NR_IRQS];
|
|
|
|
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-06 18:16:15 -04:00
|
|
|
/* Return a pointer to the irq descriptor for IRQ. */
|
|
|
|
static inline irq_desc_t *
|
|
|
|
irq_descp (int irq)
|
|
|
|
{
|
|
|
|
return irq_desc + irq;
|
|
|
|
}
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <asm/hw_irq.h> /* the arch dependent stuff */
|
|
|
|
|
|
|
|
extern int setup_irq(unsigned int irq, struct irqaction * new);
|
|
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_HARDIRQS
|
|
|
|
extern cpumask_t irq_affinity[NR_IRQS];
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-06 18:16:15 -04:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static inline void set_native_irq_info(int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
irq_affinity[irq] = mask;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void set_native_irq_info(int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
|
|
|
|
extern cpumask_t pending_irq_cpumask[NR_IRQS];
|
|
|
|
|
2006-03-25 06:07:36 -05:00
|
|
|
void set_pending_irq(unsigned int irq, cpumask_t mask);
|
|
|
|
void move_native_irq(int irq);
|
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-09-06 18:16:15 -04:00
|
|
|
|
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
/*
|
|
|
|
* Wonder why these are dummies?
|
|
|
|
* For e.g the set_ioapic_affinity_vector() calls the set_ioapic_affinity_irq()
|
|
|
|
* counter part after translating the vector to irq info. We need to perform
|
|
|
|
* this operation on the real irq, when we dont use vector, i.e when
|
|
|
|
* pci_use_vector() is false.
|
|
|
|
*/
|
|
|
|
static inline void move_irq(int irq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_irq_info(int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#else // CONFIG_PCI_MSI
|
|
|
|
|
|
|
|
static inline void move_irq(int irq)
|
|
|
|
{
|
|
|
|
move_native_irq(irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_irq_info(int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
set_native_irq_info(irq, mask);
|
|
|
|
}
|
|
|
|
#endif // CONFIG_PCI_MSI
|
|
|
|
|
|
|
|
#else // CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE
|
|
|
|
|
|
|
|
#define move_irq(x)
|
|
|
|
#define move_native_irq(x)
|
|
|
|
#define set_pending_irq(x,y)
|
|
|
|
static inline void set_irq_info(int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
set_native_irq_info(irq, mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
|
|
|
|
#else // CONFIG_SMP
|
|
|
|
|
|
|
|
#define move_irq(x)
|
|
|
|
#define move_native_irq(x)
|
|
|
|
|
|
|
|
#endif // CONFIG_SMP
|
|
|
|
|
2006-06-23 05:04:22 -04:00
|
|
|
#ifdef CONFIG_IRQBALANCE
|
|
|
|
extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask);
|
|
|
|
#else
|
|
|
|
static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
extern int no_irq_affinity;
|
|
|
|
extern int noirqdebug_setup(char *str);
|
|
|
|
|
2006-06-23 05:06:00 -04:00
|
|
|
extern fastcall irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
|
2005-06-28 23:45:18 -04:00
|
|
|
struct irqaction *action);
|
2005-04-16 18:20:36 -04:00
|
|
|
extern fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs);
|
2005-06-28 23:45:18 -04:00
|
|
|
extern void note_interrupt(unsigned int irq, irq_desc_t *desc,
|
|
|
|
int action_ret, struct pt_regs *regs);
|
2005-04-16 18:20:36 -04:00
|
|
|
extern int can_request_irq(unsigned int irq, unsigned long irqflags);
|
|
|
|
|
|
|
|
extern void init_irq_proc(void);
|
2006-01-06 03:12:21 -05:00
|
|
|
|
|
|
|
#ifdef CONFIG_AUTO_IRQ_AFFINITY
|
|
|
|
extern int select_smp_affinity(unsigned int irq);
|
|
|
|
#else
|
|
|
|
static inline int
|
|
|
|
select_smp_affinity(unsigned int irq)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
extern hw_irq_controller no_irq_type; /* needed in every arch ? */
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* __irq_h */
|