summaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/ItLpQueue.c
blob: c923a815760ec50f2b478f7f2857e175a0b0575a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
/*
 * ItLpQueue.c
 * Copyright (C) 2001 Mike Corrigan  IBM Corporation
 * 
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 */

#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/system.h>
#include <asm/paca.h>
#include <asm/iSeries/ItLpQueue.h>
#include <asm/iSeries/HvLpEvent.h>
#include <asm/iSeries/HvCallEvent.h>
#include <asm/iSeries/LparData.h>

static __inline__ int set_inUse( struct ItLpQueue * lpQueue )
{
	int t;
	u32 * inUseP = &(lpQueue->xInUseWord);

	__asm__ __volatile__("\n\
1:	lwarx	%0,0,%2		\n\
	cmpwi	0,%0,0		\n\
	li	%0,0		\n\
	bne-	2f		\n\
	addi	%0,%0,1		\n\
	stwcx.	%0,0,%2		\n\
	bne-	1b		\n\
2:	eieio"
	: "=&r" (t), "=m" (lpQueue->xInUseWord)
	: "r" (inUseP), "m" (lpQueue->xInUseWord)
	: "cc");

	return t;
}

static __inline__ void clear_inUse( struct ItLpQueue * lpQueue )
{
	lpQueue->xInUseWord = 0;
}

/* Array of LpEvent handler functions */
extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
unsigned long ItLpQueueInProcess = 0;

struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue )
{
	struct HvLpEvent * nextLpEvent = 
		(struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
	if ( nextLpEvent->xFlags.xValid ) {
		/* rmb() needed only for weakly consistent machines (regatta) */
		rmb();
		/* Set pointer to next potential event */
		lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
				      LpEventAlign ) /
				      LpEventAlign ) *
				      LpEventAlign;
		/* Wrap to beginning if no room at end */
		if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr)
			lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr;
	}
	else 
		nextLpEvent = NULL;

	return nextLpEvent;
}

int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue )
{
	int retval = 0;
	struct HvLpEvent * nextLpEvent;
	if ( lpQueue ) {
		nextLpEvent = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
		retval = nextLpEvent->xFlags.xValid | lpQueue->xPlicOverflowIntPending;
	}
	return retval;
}

void ItLpQueue_clearValid( struct HvLpEvent * event )
{
	/* Clear the valid bit of the event
	 * Also clear bits within this event that might
	 * look like valid bits (on 64-byte boundaries)
   	 */
	unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) /
						 LpEventAlign ) - 1;
	switch ( extra ) {
	  case 3:
	   ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
	  case 2:
	   ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
	  case 1:
	   ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
	  case 0:
	   ;	
	}
	mb();
	event->xFlags.xValid = 0;
}

unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
{
	unsigned numIntsProcessed = 0;
	struct HvLpEvent * nextLpEvent;

	/* If we have recursed, just return */
	if ( !set_inUse( lpQueue ) )
		return 0;
	
	if (ItLpQueueInProcess == 0)
		ItLpQueueInProcess = 1;
	else
		BUG();

	for (;;) {
		nextLpEvent = ItLpQueue_getNextLpEvent( lpQueue );
		if ( nextLpEvent ) {
			/* Count events to return to caller
			 * and count processed events in lpQueue
 			 */
			++numIntsProcessed;
			lpQueue->xLpIntCount++;		
			/* Call appropriate handler here, passing 
			 * a pointer to the LpEvent.  The handler
			 * must make a copy of the LpEvent if it
			 * needs it in a bottom half. (perhaps for
			 * an ACK)
			 *	
			 *  Handlers are responsible for ACK processing 
			 *
			 * The Hypervisor guarantees that LpEvents will
			 * only be delivered with types that we have
			 * registered for, so no type check is necessary
			 * here!
  			 */
			if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
				lpQueue->xLpIntCountByType[nextLpEvent->xType]++;
			if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
			     lpEventHandler[nextLpEvent->xType] ) 
				lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
			else
				printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
			
			ItLpQueue_clearValid( nextLpEvent );
		} else if ( lpQueue->xPlicOverflowIntPending )
			/*
			 * No more valid events. If overflow events are
			 * pending process them
			 */
			HvCallEvent_getOverflowLpEvents( lpQueue->xIndex);
		else
			break;
	}

	ItLpQueueInProcess = 0;
	mb();
	clear_inUse( lpQueue );

	get_paca()->lpevent_count += numIntsProcessed;

	return numIntsProcessed;
}