summaryrefslogtreecommitdiffstats
path: root/drivers/rtc/rtc-dev.c
blob: 8a11de9552cd06427e222f8eaba23409f3b49476 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
/*
 * RTC subsystem, dev interface
 *
 * Copyright (C) 2005 Tower Technologies
 * Author: Alessandro Zummo <a.zummo@towertech.it>
 *
 * based on arch/arm/common/rtctime.c
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
*/

#include <linux/module.h>
#include <linux/rtc.h>
#include "rtc-core.h"

static dev_t rtc_devt;

#define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */

static int rtc_dev_open(struct inode *inode, struct file *file)
{
	int err;
	struct rtc_device *rtc = container_of(inode->i_cdev,
					struct rtc_device, char_dev);
	const struct rtc_class_ops *ops = rtc->ops;

	if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
		return -EBUSY;

	file->private_data = rtc;

	err = ops->open ? ops->open(rtc->dev.parent) : 0;
	if (err == 0) {
		spin_lock_irq(&rtc->irq_lock);
		rtc->irq_data = 0;
		spin_unlock_irq(&rtc->irq_lock);

		return 0;
	}

	/* something has gone wrong */
	clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
	return err;
}

#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
/*
 * Routine to poll RTC seconds field for change as often as possible,
 * after first RTC_UIE use timer to reduce polling
 */
static void rtc_uie_task(struct work_struct *work)
{
	struct rtc_device *rtc =
		container_of(work, struct rtc_device, uie_task);
	struct rtc_time tm;
	int num = 0;
	int err;

	err = rtc_read_time(rtc, &tm);

	spin_lock_irq(&rtc->irq_lock);
	if (rtc->stop_uie_polling || err) {
		rtc->uie_task_active = 0;
	} else if (rtc->oldsecs != tm.tm_sec) {
		num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
		rtc->oldsecs = tm.tm_sec;
		rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
		rtc->uie_timer_active = 1;
		rtc->uie_task_active = 0;
		add_timer(&rtc->uie_timer);
	} else if (schedule_work(&rtc->uie_task) == 0) {
		rtc->uie_task_active = 0;
	}
	spin_unlock_irq(&rtc->irq_lock);
	if (num)
		rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF);
}
static void rtc_uie_timer(unsigned long data)
{
	struct rtc_device *rtc = (struct rtc_device *)data;
	unsigned long flags;

	spin_lock_irqsave(&rtc->irq_lock, flags);
	rtc->uie_timer_active = 0;
	rtc->uie_task_active = 1;
	if ((schedule_work(&rtc->uie_task) == 0))
		rtc->uie_task_active = 0;
	spin_unlock_irqrestore(&rtc->irq_lock, flags);
}

static int clear_uie(struct rtc_device *rtc)
{
	spin_lock_irq(&rtc->irq_lock);
	if (rtc->uie_irq_active) {
		rtc->stop_uie_polling = 1;
		if (rtc->uie_timer_active) {
			spin_unlock_irq(&rtc->irq_lock);
			del_timer_sync(&rtc->uie_timer);
			spin_lock_irq(&rtc->irq_lock);
			rtc->uie_timer_active = 0;
		}
		if (rtc->uie_task_active) {
			spin_unlock_irq(&rtc->irq_lock);
			flush_scheduled_work();
			spin_lock_irq(&rtc->irq_lock);
		}
		rtc->uie_irq_active = 0;
	}
	spin_unlock_irq(&rtc->irq_lock);
	return 0;
}

static int set_uie(struct rtc_device *rtc)
{
	struct rtc_time tm;
	int err;

	err = rtc_read_time(rtc, &tm);
	if (err)
		return err;
	spin_lock_irq(&rtc->irq_lock);
	if (!rtc->uie_irq_active) {
		rtc->uie_irq_active = 1;
		rtc->stop_uie_polling = 0;
		rtc->oldsecs = tm.tm_sec;
		rtc->uie_task_active = 1;
		if (schedule_work(&rtc->uie_task) == 0)
			rtc->uie_task_active = 0;
	}
	rtc->irq_data = 0;
	spin_unlock_irq(&rtc->irq_lock);
	return 0;
}

int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
{
	if (enabled)
		return set_uie(rtc);
	else
		return clear_uie(rtc);
}
EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);

#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */

static ssize_t
rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
	struct rtc_device *rtc = file->private_data;

	DECLARE_WAITQUEUE(wait, current);
	unsigned long data;
	ssize_t ret;

	if (count != sizeof(unsigned int) && count < sizeof(unsigned long))
		return -EINVAL;

	add_wait_queue(&rtc->irq_queue, &wait);
	do {
		__set_current_state(TASK_INTERRUPTIBLE);

		spin_lock_irq(&rtc->irq_lock);
		data = rtc->irq_data;
		rtc->irq_data = 0;
		spin_unlock_irq(&rtc->irq_lock);

		if (data != 0) {
			ret = 0;
			break;
		}
		if (file->f_flags & O_NONBLOCK) {
			ret = -EAGAIN;
			break;
		}
		if (signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}
		schedule();
	} while (1);
	set_current_state(TASK_RUNNING);
	remove_wait_queue(&rtc->irq_queue, &wait);

	if (ret == 0) {
		/* Check for any data updates */
		if (rtc->ops->read_callback)
			data = rtc->ops->read_callback(rtc->dev.parent,
						       data);

		if (sizeof(int) != sizeof(long) &&
		    count == sizeof(unsigned int))
			ret = put_user(data, (unsigned int __user *)buf) ?:
				sizeof(unsigned int);
		else
			ret = put_user(data, (unsigned long __user *)buf) ?:
				sizeof(unsigned long);
	}
	return ret;
}

static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
{
	struct rtc_device *rtc = file->private_data;
	unsigned long data;

	poll_wait(file, &rtc->irq_queue, wait);

	data = rtc->irq_data;

	return (data != 0) ? (POLLIN | POLLRDNORM) : 0;
}

static long rtc_dev_ioctl(struct file *file,
		unsigned int cmd, unsigned long arg)
{
	int err = 0;
	struct rtc_device *rtc = file->private_data;
	const struct rtc_class_ops *ops = rtc->ops;
	struct rtc_time tm;
	struct rtc_wkalrm alarm;
	void __user *uarg = (void __user *) arg;

	err = mutex_lock_interruptible(&rtc->ops_lock);
	if (err)
		return err;

	/* check that the calling task has appropriate permissions
	 * for certain ioctls. doing this check here is useful
	 * to avoid duplicate code in each driver.
	 */
	switch (cmd) {
	case RTC_EPOCH_SET:
	case RTC_SET_TIME:
		if (!capable(CAP_SYS_TIME))
			err = -EACCES;
		break;

	case RTC_IRQP_SET:
		if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE))
			err = -EACCES;
		break;

	case RTC_PIE_ON:
		if (rtc->irq_freq > rtc->max_user_freq &&
				!capable(CAP_SYS_RESOURCE))
			err = -EACCES;
		break;
	}

	if (err)
		goto done;

	/* try the driver's ioctl interface */
	if (ops->ioctl) {
		err = ops->ioctl(rtc->dev.parent, cmd, arg);
		if (err != -ENOIOCTLCMD) {
			mutex_unlock(&rtc->ops_lock);
			return err;
		}
	}

	/* if the driver does not provide the ioctl interface
	 * or if that particular ioctl was not implemented
	 * (-ENOIOCTLCMD), we will try to emulate here.
	 *
	 * Drivers *SHOULD NOT* provide ioctl implementations
	 * for these requests.  Instead, provide methods to
	 * support the following code, so that the RTC's main
	 * features are accessible without using ioctls.
	 *
	 * RTC and alarm times will be in UTC, by preference,
	 * but dual-booting with MS-Windows implies RTCs must
	 * use the local wall clock time.
	 */

	switch (cmd) {
	case RTC_ALM_READ:
		mutex_unlock(&rtc->ops_lock);

		err = rtc_read_alarm(rtc, &alarm);
		if (err < 0)
			return err;

		if (copy_to_user(uarg, &alarm.time, sizeof(tm)))
			err = -EFAULT;
		return err;

	case RTC_ALM_SET:
		mutex_unlock(&rtc->ops_lock);

		if (copy_from_user(&alarm.time, uarg, sizeof(tm)))
			return -EFAULT;

		alarm.enabled = 0;
		alarm.pending = 0;
		alarm.time.tm_wday = -1;
		alarm.time.tm_yday = -1;
		alarm.time.tm_isdst = -1;

		/* RTC_ALM_SET alarms may be up to 24 hours in the future.
		 * Rather than expecting every RTC to implement "don't care"
		 * for day/month/year fields, just force the alarm to have
		 * the right values for those fields.
		 *
		 * RTC_WKALM_SET should be used instead.  Not only does it
		 * eliminate the need for a separate RTC_AIE_ON call, it
		 * doesn't have the "alarm 23:59:59 in the future" race.
		 *
		 * NOTE:  some legacy code may have used invalid fields as
		 * wildcards, exposing hardware "periodic alarm" capabilities.
		 * Not supported here.
		 */
		{
			unsigned long now, then;

			err = rtc_read_time(rtc, &tm);
			if (err < 0)
				return err;
			rtc_tm_to_time(&tm, &now);

			alarm.time.tm_mday = tm.tm_mday;
			alarm.time.tm_mon = tm.tm_mon;
			alarm.time.tm_year = tm.tm_year;
			err  = rtc_valid_tm(&alarm.time);
			if (err < 0)
				return err;
			rtc_tm_to_time(&alarm.time, &then);

			/* alarm may need to wrap into tomorrow */
			if (then < now) {
				rtc_time_to_tm(now + 24 * 60 * 60, &tm);
				alarm.time.tm_mday = tm.tm_mday;
				alarm.time.tm_mon = tm.tm_mon;
				alarm.time.tm_year = tm.tm_year;
			}
		}

		return rtc_set_alarm(rtc, &alarm);

	case RTC_RD_TIME:
		mutex_unlock(&rtc->ops_lock);

		err = rtc_read_time(rtc, &tm);
		if (err < 0)
			return err;

		if (copy_to_user(uarg, &tm, sizeof(tm)))
			err = -EFAULT;
		return err;

	case RTC_SET_TIME:
		mutex_unlock(&rtc->ops_lock);

		if (copy_from_user(&tm, uarg, sizeof(tm)))
			return -EFAULT;

		return rtc_set_time(rtc, &tm);

	case RTC_PIE_ON:
		err = rtc_irq_set_state(rtc, NULL, 1);
		break;

	case RTC_PIE_OFF:
		err = rtc_irq_set_state(rtc, NULL, 0);
		break;

	case RTC_AIE_ON:
		mutex_unlock(&rtc->ops_lock);
		return rtc_alarm_irq_enable(rtc, 1);

	case RTC_AIE_OFF:
		mutex_unlock(&rtc->ops_lock);
		return rtc_alarm_irq_enable(rtc, 0);

	case RTC_UIE_ON:
		mutex_unlock(&rtc->ops_lock);
		return rtc_update_irq_enable(rtc, 1);

	case RTC_UIE_OFF:
		mutex_unlock(&rtc->ops_lock);
		return rtc_update_irq_enable(rtc, 0);

	case RTC_IRQP_SET:
		err = rtc_irq_set_freq(rtc, NULL, arg);
		break;

	case RTC_IRQP_READ:
		err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
		break;

#if 0
	case RTC_EPOCH_SET:
#ifndef rtc_epoch
		/*
		 * There were no RTC clocks before 1900.
		 */
		if (arg < 1900) {
			err = -EINVAL;
			break;
		}
		rtc_epoch = arg;
		err = 0;
#endif
		break;

	case RTC_EPOCH_READ:
		err = put_user(rtc_epoch, (unsigned long __user *)uarg);
		break;
#endif
	case RTC_WKALM_SET:
		mutex_unlock(&rtc->ops_lock);
		if (copy_from_user(&alarm, uarg, sizeof(alarm)))
			return -EFAULT;

		return rtc_set_alarm(rtc, &alarm);

	case RTC_WKALM_RD:
		mutex_unlock(&rtc->ops_lock);
		err = rtc_read_alarm(rtc, &alarm);
		if (err < 0)
			return err;

		if (copy_to_user(uarg, &alarm, sizeof(alarm)))
			err = -EFAULT;
		return err;

	default:
		err = -ENOTTY;
		break;
	}

done:
	mutex_unlock(&rtc->ops_lock);
	return err;
}

static int rtc_dev_fasync(int fd, struct file *file, int on)
{
	struct rtc_device *rtc = file->private_data;
	return fasync_helper(fd, file, on, &rtc->async_queue);
}

static int rtc_dev_release(struct inode *inode, struct file *file)
{
	struct rtc_device *rtc = file->private_data;

	/* We shut down the repeating IRQs that userspace enabled,
	 * since nothing is listening to them.
	 *  - Update (UIE) ... currently only managed through ioctls
	 *  - Periodic (PIE) ... also used through rtc_*() interface calls
	 *
	 * Leave the alarm alone; it may be set to trigger a system wakeup
	 * later, or be used by kernel code, and is a one-shot event anyway.
	 */

	/* Keep ioctl until all drivers are converted */
	rtc_dev_ioctl(file, RTC_UIE_OFF, 0);
	rtc_update_irq_enable(rtc, 0);
	rtc_irq_set_state(rtc, NULL, 0);

	if (rtc->ops->release)
		rtc->ops->release(rtc->dev.parent);

	clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
	return 0;
}

static const struct file_operations rtc_dev_fops = {
	.owner		= THIS_MODULE,
	.llseek		= no_llseek,
	.read		= rtc_dev_read,
	.poll		= rtc_dev_poll,
	.unlocked_ioctl	= rtc_dev_ioctl,
	.open		= rtc_dev_open,
	.release	= rtc_dev_release,
	.fasync		= rtc_dev_fasync,
};

/* insertion/removal hooks */

void rtc_dev_prepare(struct rtc_device *rtc)
{
	if (!rtc_devt)
		return;

	if (rtc->id >= RTC_DEV_MAX) {
		pr_debug("%s: too many RTC devices\n", rtc->name);
		return;
	}

	rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);

#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
	INIT_WORK(&rtc->uie_task, rtc_uie_task);
	setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
#endif

	cdev_init(&rtc->char_dev, &rtc_dev_fops);
	rtc->char_dev.owner = rtc->owner;
}

void rtc_dev_add_device(struct rtc_device *rtc)
{
	if (cdev_add(&rtc->char_dev, rtc->dev.devt, 1))
		printk(KERN_WARNING "%s: failed to add char device %d:%d\n",
			rtc->name, MAJOR(rtc_devt), rtc->id);
	else
		pr_debug("%s: dev (%d:%d)\n", rtc->name,
			MAJOR(rtc_devt), rtc->id);
}

void rtc_dev_del_device(struct rtc_device *rtc)
{
	if (rtc->dev.devt)
		cdev_del(&rtc->char_dev);
}

void __init rtc_dev_init(void)
{
	int err;

	err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
	if (err < 0)
		printk(KERN_ERR "%s: failed to allocate char dev region\n",
			__FILE__);
}

void __exit rtc_dev_exit(void)
{
	if (rtc_devt)
		unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
}
pan class="hl kwd">free_thread_info(tsk->stack); rt_mutex_debug_task_free(tsk); ftrace_graph_exit_task(tsk); free_task_struct(tsk); } EXPORT_SYMBOL(free_task); void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); exit_creds(tsk); delayacct_tsk_free(tsk); if (!profile_handoff_task(tsk)) free_task(tsk); } /* * macro override instead of weak attribute alias, to workaround * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions. */ #ifndef arch_task_cache_init #define arch_task_cache_init() #endif void __init fork_init(unsigned long mempages) { #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR #ifndef ARCH_MIN_TASKALIGN #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES #endif /* create a slab on which task_structs can be allocated */ task_struct_cachep = kmem_cache_create("task_struct", sizeof(struct task_struct), ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); #endif /* do the arch specific task caches init */ arch_task_cache_init(); /* * The default maximum number of threads is set to a safe * value: the thread structures can take up at most half * of memory. */ max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE); /* * we need to allow at least 20 threads to boot a system */ if(max_threads < 20) max_threads = 20; init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; init_task.signal->rlim[RLIMIT_SIGPENDING] = init_task.signal->rlim[RLIMIT_NPROC]; } int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; return 0; } static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; struct thread_info *ti; unsigned long *stackend; int err; prepare_to_copy(orig); tsk = alloc_task_struct(); if (!tsk) return NULL; ti = alloc_thread_info(tsk); if (!ti) { free_task_struct(tsk); return NULL; } err = arch_dup_task_struct(tsk, orig); if (err) goto out; tsk->stack = ti; err = prop_local_init_single(&tsk->dirties); if (err) goto out; setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ #ifdef CONFIG_CC_STACKPROTECTOR tsk->stack_canary = get_random_int(); #endif /* One for us, one for whoever does the "release_task()" (usually parent) */ atomic_set(&tsk->usage,2); atomic_set(&tsk->fs_excl, 0); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; account_kernel_stack(ti, 1); return tsk; out: free_thread_info(ti); free_task_struct(tsk); return NULL; } #ifdef CONFIG_MMU static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp, **pprev; struct rb_node **rb_link, *rb_parent; int retval; unsigned long charge; struct mempolicy *pol; down_write(&oldmm->mmap_sem); flush_cache_dup_mm(oldmm); /* * Not linked in yet - no deadlock potential: */ down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); mm->locked_vm = 0; mm->mmap = NULL; mm->mmap_cache = NULL; mm->free_area_cache = oldmm->mmap_base; mm->cached_hole_size = ~0UL; mm->map_count = 0; cpumask_clear(mm_cpumask(mm)); mm->mm_rb = RB_ROOT; rb_link = &mm->mm_rb.rb_node; rb_parent = NULL; pprev = &mm->mmap; retval = ksm_fork(mm, oldmm); if (retval) goto out; for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { struct file *file; if (mpnt->vm_flags & VM_DONTCOPY) { long pages = vma_pages(mpnt); mm->total_vm -= pages; vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file, -pages); continue; } charge = 0; if (mpnt->vm_flags & VM_ACCOUNT) { unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; if (security_vm_enough_memory(len)) goto fail_nomem; charge = len; } tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!tmp) goto fail_nomem; *tmp = *mpnt; pol = mpol_dup(vma_policy(mpnt)); retval = PTR_ERR(pol); if (IS_ERR(pol)) goto fail_nomem_policy; vma_set_policy(tmp, pol); tmp->vm_flags &= ~VM_LOCKED; tmp->vm_mm = mm; tmp->vm_next = NULL; anon_vma_link(tmp); file = tmp->vm_file; if (file) { struct inode *inode = file->f_path.dentry->d_inode; struct address_space *mapping = file->f_mapping; get_file(file); if (tmp->vm_flags & VM_DENYWRITE) atomic_dec(&inode->i_writecount); spin_lock(&mapping->i_mmap_lock); if (tmp->vm_flags & VM_SHARED) mapping->i_mmap_writable++; tmp->vm_truncate_count = mpnt->vm_truncate_count; flush_dcache_mmap_lock(mapping); /* insert tmp into the share list, just after mpnt */ vma_prio_tree_add(tmp, mpnt); flush_dcache_mmap_unlock(mapping); spin_unlock(&mapping->i_mmap_lock); } /* * Clear hugetlb-related page reserves for children. This only * affects MAP_PRIVATE mappings. Faults generated by the child * are not guaranteed to succeed, even if read-only */ if (is_vm_hugetlb_page(tmp)) reset_vma_resv_huge_pages(tmp); /* * Link in the new vma and copy the page table entries. */ *pprev = tmp; pprev = &tmp->vm_next; __vma_link_rb(mm, tmp, rb_link, rb_parent); rb_link = &tmp->vm_rb.rb_right; rb_parent = &tmp->vm_rb; mm->map_count++; retval = copy_page_range(mm, oldmm, mpnt); if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); if (retval) goto out; } /* a new mm has just been created */ arch_dup_mmap(oldmm, mm); retval = 0; out: up_write(&mm->mmap_sem); flush_tlb_mm(oldmm); up_write(&oldmm->mmap_sem); return retval; fail_nomem_policy: kmem_cache_free(vm_area_cachep, tmp); fail_nomem: retval = -ENOMEM; vm_unacct_memory(charge); goto out; } static inline int mm_alloc_pgd(struct mm_struct * mm) { mm->pgd = pgd_alloc(mm); if (unlikely(!mm->pgd)) return -ENOMEM; return 0; } static inline void mm_free_pgd(struct mm_struct * mm) { pgd_free(mm, mm->pgd); } #else #define dup_mmap(mm, oldmm) (0) #define mm_alloc_pgd(mm) (0) #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; static int __init coredump_filter_setup(char *s) { default_dump_filter = (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & MMF_DUMP_FILTER_MASK; return 1; } __setup("coredump_filter=", coredump_filter_setup); #include <linux/init_task.h> static void mm_init_aio(struct mm_struct *mm) { #ifdef CONFIG_AIO spin_lock_init(&mm->ioctx_lock); INIT_HLIST_HEAD(&mm->ioctx_list); #endif } static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) { atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); INIT_LIST_HEAD(&mm->mmlist); mm->flags = (current->mm) ? (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; mm->core_state = NULL; mm->nr_ptes = 0; set_mm_counter(mm, file_rss, 0); set_mm_counter(mm, anon_rss, 0); spin_lock_init(&mm->page_table_lock); mm->free_area_cache = TASK_UNMAPPED_BASE; mm->cached_hole_size = ~0UL; mm_init_aio(mm); mm_init_owner(mm, p); if (likely(!mm_alloc_pgd(mm))) { mm->def_flags = 0; mmu_notifier_mm_init(mm); return mm; } free_mm(mm); return NULL; } /* * Allocate and initialize an mm_struct. */ struct mm_struct * mm_alloc(void) { struct mm_struct * mm; mm = allocate_mm(); if (mm) { memset(mm, 0, sizeof(*mm)); mm = mm_init(mm, current); } return mm; } /* * Called when the last reference to the mm * is dropped: either by a lazy thread or by * mmput. Free the page directory and the mm. */ void __mmdrop(struct mm_struct *mm) { BUG_ON(mm == &init_mm); mm_free_pgd(mm); destroy_context(mm); mmu_notifier_mm_destroy(mm); free_mm(mm); } EXPORT_SYMBOL_GPL(__mmdrop); /* * Decrement the use count and release all resources for an mm. */ void mmput(struct mm_struct *mm) { might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) { exit_aio(mm); ksm_exit(mm); exit_mmap(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); list_del(&mm->mmlist); spin_unlock(&mmlist_lock); } put_swap_token(mm); if (mm->binfmt) module_put(mm->binfmt->module); mmdrop(mm); } } EXPORT_SYMBOL_GPL(mmput); /** * get_task_mm - acquire a reference to the task's mm * * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning * this kernel workthread has transiently adopted a user mm with use_mm, * to do its AIO) is not set and if so returns a reference to it, after * bumping up the use count. User must release the mm via mmput() * after use. Typically used by /proc and ptrace. */ struct mm_struct *get_task_mm(struct task_struct *task) { struct mm_struct *mm; task_lock(task); mm = task->mm; if (mm) { if (task->flags & PF_KTHREAD) mm = NULL; else atomic_inc(&mm->mm_users); } task_unlock(task); return mm; } EXPORT_SYMBOL_GPL(get_task_mm); /* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. * * mm_release is called after a mm_struct has been removed * from the current process. * * This difference is important for error handling, when we * only half set up a mm_struct for a new process and need to restore * the old one. Because we mmput the new mm_struct before * restoring the old one. . . * Eric Biederman 10 January 1998 */ void mm_release(struct task_struct *tsk, struct mm_struct *mm) { struct completion *vfork_done = tsk->vfork_done; /* Get rid of any futexes when releasing the mm */ #ifdef CONFIG_FUTEX if (unlikely(tsk->robust_list)) { exit_robust_list(tsk); tsk->robust_list = NULL; } #ifdef CONFIG_COMPAT if (unlikely(tsk->compat_robust_list)) { compat_exit_robust_list(tsk); tsk->compat_robust_list = NULL; } #endif if (unlikely(!list_empty(&tsk->pi_state_list))) exit_pi_state_list(tsk); #endif /* Get rid of any cached register state */ deactivate_mm(tsk, mm); /* notify parent sleeping on vfork() */ if (vfork_done) { tsk->vfork_done = NULL; complete(vfork_done); } /* * If we're exiting normally, clear a user-space tid field if * requested. We leave this alone when dying by signal, to leave * the value intact in a core dump, and to save the unnecessary * trouble otherwise. Userland only wants this done for a sys_exit. */ if (tsk->clear_child_tid) { if (!(tsk->flags & PF_SIGNALED) && atomic_read(&mm->mm_users) > 1) { /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. */ put_user(0, tsk->clear_child_tid); sys_futex(tsk->clear_child_tid, FUTEX_WAKE, 1, NULL, NULL, 0); } tsk->clear_child_tid = NULL; } } /* * Allocate a new mm structure and copy contents from the * mm structure of the passed in task structure. */ struct mm_struct *dup_mm(struct task_struct *tsk) { struct mm_struct *mm, *oldmm = current->mm; int err; if (!oldmm) return NULL; mm = allocate_mm(); if (!mm) goto fail_nomem; memcpy(mm, oldmm, sizeof(*mm)); /* Initializing for Swap token stuff */ mm->token_priority = 0; mm->last_interval = 0; if (!mm_init(mm, tsk)) goto fail_nomem; if (init_new_context(tsk, mm)) goto fail_nocontext; dup_mm_exe_file(oldmm, mm); err = dup_mmap(mm, oldmm); if (err) goto free_pt; mm->hiwater_rss = get_mm_rss(mm); mm->hiwater_vm = mm->total_vm; if (mm->binfmt && !try_module_get(mm->binfmt->module)) goto free_pt; return mm; free_pt: /* don't put binfmt in mmput, we haven't got module yet */ mm->binfmt = NULL; mmput(mm); fail_nomem: return NULL; fail_nocontext: /* * If init_new_context() failed, we cannot use mmput() to free the mm * because it calls destroy_context() */ mm_free_pgd(mm); free_mm(mm); return NULL; } static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) { struct mm_struct * mm, *oldmm; int retval; tsk->min_flt = tsk->maj_flt = 0; tsk->nvcsw = tsk->nivcsw = 0; #ifdef CONFIG_DETECT_HUNG_TASK tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; #endif tsk->mm = NULL; tsk->active_mm = NULL; /* * Are we cloning a kernel thread? * * We need to steal a active VM for that.. */ oldmm = current->mm; if (!oldmm) return 0; if (clone_flags & CLONE_VM) { atomic_inc(&oldmm->mm_users); mm = oldmm; goto good_mm; } retval = -ENOMEM; mm = dup_mm(tsk); if (!mm) goto fail_nomem; good_mm: /* Initializing for Swap token stuff */ mm->token_priority = 0; mm->last_interval = 0; tsk->mm = mm; tsk->active_mm = mm; return 0; fail_nomem: return retval; } static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) { struct fs_struct *fs = current->fs; if (clone_flags & CLONE_FS) { /* tsk->fs is already what we want */ write_lock(&fs->lock); if (fs->in_exec) { write_unlock(&fs->lock); return -EAGAIN; } fs->users++; write_unlock(&fs->lock); return 0; } tsk->fs = copy_fs_struct(fs); if (!tsk->fs) return -ENOMEM; return 0; } static int copy_files(unsigned long clone_flags, struct task_struct * tsk) { struct files_struct *oldf, *newf; int error = 0; /* * A background process may not have any files ... */ oldf = current->files; if (!oldf) goto out; if (clone_flags & CLONE_FILES) { atomic_inc(&oldf->count); goto out; } newf = dup_fd(oldf, &error); if (!newf) goto out; tsk->files = newf; error = 0; out: return error; } static int copy_io(unsigned long clone_flags, struct task_struct *tsk) { #ifdef CONFIG_BLOCK struct io_context *ioc = current->io_context; if (!ioc) return 0; /* * Share io context with parent, if CLONE_IO is set */ if (clone_flags & CLONE_IO) { tsk->io_context = ioc_task_link(ioc); if (unlikely(!tsk->io_context)) return -ENOMEM; } else if (ioprio_valid(ioc->ioprio)) { tsk->io_context = alloc_io_context(GFP_KERNEL, -1); if (unlikely(!tsk->io_context)) return -ENOMEM; tsk->io_context->ioprio = ioc->ioprio; } #endif return 0; } static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) { struct sighand_struct *sig;