@ linux-patched/include/xenomai/linux/stdarg.h:1 @
--- linux/include/xenomai/version.h	1970-01-01 01:00:00.000000000 +0100
+#include <stdarg.h>
+++ linux-patched/include/xenomai/version.h	2022-03-21 12:58:32.309860487 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/sched.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_VERSION_H
+#define _XENOMAI_VERSION_H
+
+#ifndef __KERNEL__
+#include <xeno_config.h>
+#include <boilerplate/compiler.h>
+#endif
+
+#define XENO_VERSION(maj, min, rev)  (((maj)<<16)|((min)<<8)|(rev))
+
+#define XENO_VERSION_CODE	XENO_VERSION(CONFIG_XENO_VERSION_MAJOR,	\
+					     CONFIG_XENO_VERSION_MINOR,	\
+					     CONFIG_XENO_REVISION_LEVEL)
+
+#define XENO_VERSION_STRING	CONFIG_XENO_VERSION_STRING
+
+#endif /* _XENOMAI_VERSION_H */
+++ linux-patched/include/xenomai/pipeline/sched.h	2022-03-21 12:58:32.033863179 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/sirq.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_SCHED_H
+#define _COBALT_KERNEL_DOVETAIL_SCHED_H
+
+#include <cobalt/kernel/lock.h>
+
+struct xnthread;
+struct xnsched;
+struct task_struct;
+
+void pipeline_init_shadow_tcb(struct xnthread *thread);
+
+void pipeline_init_root_tcb(struct xnthread *thread);
+
+int ___xnsched_run(struct xnsched *sched);
+
+static inline int pipeline_schedule(struct xnsched *sched)
+{
+	return run_oob_call((int (*)(void *))___xnsched_run, sched);
+}
+
+static inline void pipeline_prep_switch_oob(struct xnthread *root)
+{
+	/* N/A */
+}
+
+bool pipeline_switch_to(struct xnthread *prev,
+			struct xnthread *next,
+			bool leaving_inband);
+
+int pipeline_leave_inband(void);
+
+int pipeline_leave_oob_prepare(void);
+
+static inline void pipeline_leave_oob_unlock(void)
+{
+	/*
+	 * We may not re-enable hard irqs due to the specifics of
+	 * stage escalation via run_oob_call(), to prevent breaking
+	 * the (virtual) interrupt state.
+	 */
+	xnlock_put(&nklock);
+}
+
+void pipeline_leave_oob_finish(void);
+
+static inline
+void pipeline_finalize_thread(struct xnthread *thread)
+{
+	/* N/A */
+}
+
+void pipeline_raise_mayday(struct task_struct *tsk);
+
+void pipeline_clear_mayday(void);
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_SCHED_H */
+++ linux-patched/include/xenomai/pipeline/sirq.h	2022-03-21 12:58:32.026863247 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/wrappers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_SIRQ_H
+#define _COBALT_KERNEL_DOVETAIL_SIRQ_H
+
+#include <linux/irq_pipeline.h>
+#include <cobalt/kernel/assert.h>
+
+/*
+ * Wrappers to create "synthetic IRQs" the Dovetail way. Those
+ * interrupt channels can only be trigged by software, in order to run
+ * a handler on the in-band execution stage.
+ */
+
+static inline
+int pipeline_create_inband_sirq(irqreturn_t (*handler)(int irq, void *dev_id))
+{
+	/*
+	 * Allocate an IRQ from the synthetic interrupt domain then
+	 * trap it to @handler, to be fired from the in-band stage.
+	 */
+	int sirq, ret;
+
+	sirq = irq_create_direct_mapping(synthetic_irq_domain);
+	if (sirq == 0)
+		return -EAGAIN;
+
+	ret = __request_percpu_irq(sirq,
+			handler,
+			IRQF_NO_THREAD,
+			"Inband sirq",
+			&cobalt_machine_cpudata);
+
+	if (ret) {
+		irq_dispose_mapping(sirq);
+		return ret;
+	}
+
+	return sirq;
+}
+
+static inline
+void pipeline_delete_inband_sirq(int sirq)
+{
+	/*
+	 * Free the synthetic IRQ then deallocate it to its
+	 * originating domain.
+	 */
+	free_percpu_irq(sirq,
+		&cobalt_machine_cpudata);
+
+	irq_dispose_mapping(sirq);
+}
+
+static inline void pipeline_post_sirq(int sirq)
+{
+	/* Trigger the synthetic IRQ */
+	irq_post_inband(sirq);
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_SIRQ_H */
+++ linux-patched/include/xenomai/pipeline/wrappers.h	2022-03-21 12:58:32.019863315 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/kevents.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+#ifndef _COBALT_KERNEL_DOVETAIL_WRAPPERS_H
+#define _COBALT_KERNEL_DOVETAIL_WRAPPERS_H
+
+/* No wrapper needed so far. */
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_WRAPPERS_H */
+++ linux-patched/include/xenomai/pipeline/kevents.h	2022-03-21 12:58:32.011863393 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/vdso_fallback.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_KEVENTS_H
+#define _COBALT_KERNEL_DOVETAIL_KEVENTS_H
+
+#define KEVENT_PROPAGATE   0
+#define KEVENT_STOP        1
+
+struct cobalt_process;
+struct cobalt_thread;
+
+static inline
+int pipeline_attach_process(struct cobalt_process *process)
+{
+	return 0;
+}
+
+static inline
+void pipeline_detach_process(struct cobalt_process *process)
+{ }
+
+int pipeline_prepare_current(void);
+
+void pipeline_attach_current(struct xnthread *thread);
+
+int pipeline_trap_kevents(void);
+
+void pipeline_enable_kevents(void);
+
+void pipeline_cleanup_process(void);
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_KEVENTS_H */
+++ linux-patched/include/xenomai/pipeline/vdso_fallback.h	2022-03-21 12:58:32.004863461 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/machine.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ * Copyright (c) Siemens AG, 2021
+ */
+
+#ifndef _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H
+#define _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H
+
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/timer.h>
+#include <xenomai/posix/clock.h>
+
+#define is_clock_gettime(__nr)		((__nr) == __NR_clock_gettime)
+
+#ifndef __NR_clock_gettime64
+#define is_clock_gettime64(__nr)	0
+#else
+#define is_clock_gettime64(__nr)	((__nr) == __NR_clock_gettime64)
+#endif
+
+static __always_inline bool 
+pipeline_handle_vdso_fallback(int nr, struct pt_regs *regs)
+{
+	struct __kernel_old_timespec __user *u_old_ts;
+	struct __kernel_timespec uts, __user *u_uts;
+	struct __kernel_old_timespec old_ts;
+	struct timespec64 ts64;
+	int clock_id, ret = 0;
+	unsigned long args[6];
+
+	if (!is_clock_gettime(nr) && !is_clock_gettime64(nr))
+		return false;
+
+	/*
+	 * We need to fetch the args again because not all archs use the same
+	 * calling convention for Linux and Xenomai syscalls.
+	 */
+	syscall_get_arguments(current, regs, args);
+
+	clock_id = (int)args[0];
+	switch (clock_id) {
+	case CLOCK_MONOTONIC:
+		ns2ts(&ts64, xnclock_read_monotonic(&nkclock));
+		break;
+	case CLOCK_REALTIME:
+		ns2ts(&ts64, xnclock_read_realtime(&nkclock));
+		break;
+	default:
+		return false;
+	}
+
+	if (is_clock_gettime(nr)) {
+		old_ts.tv_sec = (__kernel_old_time_t)ts64.tv_sec;
+		old_ts.tv_nsec = ts64.tv_nsec;
+		u_old_ts = (struct __kernel_old_timespec __user *)args[1];
+		if (raw_copy_to_user(u_old_ts, &old_ts, sizeof(old_ts)))
+			ret = -EFAULT;
+	} else if (is_clock_gettime64(nr)) {
+		uts.tv_sec = ts64.tv_sec;
+		uts.tv_nsec = ts64.tv_nsec;
+		u_uts = (struct __kernel_timespec __user *)args[1];
+		if (raw_copy_to_user(u_uts, &uts, sizeof(uts)))
+			ret = -EFAULT;
+	}
+
+	__xn_status_return(regs, ret);
+
+	return true;
+}
+
+#endif /* !_COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H */
+++ linux-patched/include/xenomai/pipeline/machine.h	2022-03-21 12:58:31.997863530 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/irq.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_MACHINE_H
+#define _COBALT_KERNEL_DOVETAIL_MACHINE_H
+
+#include <linux/percpu.h>
+
+#ifdef CONFIG_FTRACE
+#define boot_lat_trace_notice "[LTRACE]"
+#else
+#define boot_lat_trace_notice ""
+#endif
+
+struct vm_area_struct;
+
+struct cobalt_machine {
+	const char *name;
+	int (*init)(void);
+	int (*late_init)(void);
+	void (*cleanup)(void);
+	void (*prefault)(struct vm_area_struct *vma);
+	const char *const *fault_labels;
+};
+
+extern struct cobalt_machine cobalt_machine;
+
+struct cobalt_machine_cpudata {
+	unsigned int faults[32];
+};
+
+DECLARE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata);
+
+struct cobalt_pipeline {
+#ifdef CONFIG_SMP
+	cpumask_t supported_cpus;
+#endif
+};
+
+int pipeline_init(void);
+
+int pipeline_late_init(void);
+
+void pipeline_cleanup(void);
+
+extern struct cobalt_pipeline cobalt_pipeline;
+
+#endif /* !_COBALT_KERNEL_IPIPE_MACHINE_H */
+++ linux-patched/include/xenomai/pipeline/irq.h	2022-03-21 12:58:31.989863608 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/tick.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_IRQ_H
+#define _COBALT_KERNEL_DOVETAIL_IRQ_H
+
+static inline void xnintr_init_proc(void)
+{
+	/* N/A */
+}
+
+static inline void xnintr_cleanup_proc(void)
+{
+	/* N/A */
+}
+
+static inline int xnintr_mount(void)
+{
+	/* N/A */
+	return 0;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_IRQ_H */
+++ linux-patched/include/xenomai/pipeline/tick.h	2022-03-21 12:58:31.982863676 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_TICK_H
+#define _COBALT_KERNEL_IPIPE_TICK_H
+
+int pipeline_install_tick_proxy(void);
+
+void pipeline_uninstall_tick_proxy(void);
+
+struct xnsched;
+
+bool pipeline_must_force_program_tick(struct xnsched *sched);
+
+#endif /* !_COBALT_KERNEL_IPIPE_TICK_H */
+++ linux-patched/include/xenomai/pipeline/thread.h	2022-03-21 12:58:31.974863754 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/inband_work.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_THREAD_H
+#define _COBALT_KERNEL_DOVETAIL_THREAD_H
+
+#include <linux/dovetail.h>
+
+struct xnthread;
+
+#define cobalt_threadinfo oob_thread_state
+
+static inline struct cobalt_threadinfo *pipeline_current(void)
+{
+	return dovetail_current_state();
+}
+
+static inline
+struct xnthread *pipeline_thread_from_task(struct task_struct *p)
+{
+	return dovetail_task_state(p)->thread;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_THREAD_H */
+++ linux-patched/include/xenomai/pipeline/inband_work.h	2022-03-21 12:58:31.967863822 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/lock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H
+#define _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H
+
+#include <linux/irq_work.h>
+
+/*
+ * This field must be named inband_work and appear first in the
+ * container work struct.
+ */
+struct pipeline_inband_work {
+	struct irq_work work;
+};
+
+#define PIPELINE_INBAND_WORK_INITIALIZER(__work, __handler)		\
+	{								\
+		.work = IRQ_WORK_INIT((void (*)(struct irq_work *))__handler), \
+	}
+
+#define pipeline_post_inband_work(__work)				\
+			irq_work_queue(&(__work)->inband_work.work)
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_INBAND_WORK_H */
+++ linux-patched/include/xenomai/pipeline/lock.h	2022-03-21 12:58:31.960863890 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/pipeline.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_LOCK_H
+#define _COBALT_KERNEL_DOVETAIL_LOCK_H
+
+#include <linux/spinlock.h>
+
+typedef hard_spinlock_t pipeline_spinlock_t;
+
+#define PIPELINE_SPIN_LOCK_UNLOCKED(__name)  __HARD_SPIN_LOCK_INITIALIZER(__name)
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+/* Disable UP-over-SMP kernel optimization in debug mode. */
+#define __locking_active__  1
+
+#else
+
+#ifdef CONFIG_SMP
+#define __locking_active__  1
+#else
+#define __locking_active__  IS_ENABLED(CONFIG_SMP)
+#endif
+
+#endif
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_LOCK_H */
+++ linux-patched/include/xenomai/pipeline/pipeline.h	2022-03-21 12:58:31.952863968 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/trace.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_PIPELINE_H
+#define _COBALT_KERNEL_DOVETAIL_PIPELINE_H
+
+#include <linux/irq_pipeline.h>
+#include <linux/cpumask.h>
+#include <cobalt/kernel/assert.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/syscall.h>
+#include <pipeline/machine.h>
+
+typedef unsigned long spl_t;
+
+/*
+ * We only keep the LSB when testing in SMP mode in order to strip off
+ * the recursion marker (0x2) the nklock may store there.
+ */
+#define splhigh(x)  ((x) = oob_irq_save() & 1)
+#ifdef CONFIG_SMP
+#define splexit(x)  oob_irq_restore(x & 1)
+#else /* !CONFIG_SMP */
+#define splexit(x)  oob_irq_restore(x)
+#endif /* !CONFIG_SMP */
+#define splmax()    oob_irq_disable()
+#define splnone()   oob_irq_enable()
+#define spltest()   oob_irqs_disabled()
+
+#define is_secondary_domain()	running_inband()
+#define is_primary_domain()	running_oob()
+
+#ifdef CONFIG_SMP
+
+irqreturn_t pipeline_reschedule_ipi_handler(int irq, void *dev_id);
+
+static inline int pipeline_request_resched_ipi(void (*handler)(void))
+{
+	if (num_possible_cpus() == 1)
+		return 0;
+
+	/* Trap the out-of-band rescheduling interrupt. */
+	return __request_percpu_irq(RESCHEDULE_OOB_IPI,
+			pipeline_reschedule_ipi_handler,
+			IRQF_OOB,
+			"Xenomai reschedule",
+			&cobalt_machine_cpudata);
+}
+
+static inline void pipeline_free_resched_ipi(void)
+{
+	if (num_possible_cpus() > 1)
+		/* Release the out-of-band rescheduling interrupt. */
+		free_percpu_irq(RESCHEDULE_OOB_IPI, &cobalt_machine_cpudata);
+}
+
+static inline void pipeline_send_resched_ipi(const struct cpumask *dest)
+{
+	/*
+	 * Trigger the out-of-band rescheduling interrupt on remote
+	 * CPU(s).
+	 */
+	irq_send_oob_ipi(RESCHEDULE_OOB_IPI, dest);
+}
+
+static inline void pipeline_send_timer_ipi(const struct cpumask *dest)
+{
+	/*
+	 * Trigger the out-of-band timer interrupt on remote CPU(s).
+	 */
+	irq_send_oob_ipi(TIMER_OOB_IPI, dest);
+}
+
+#else  /* !CONFIG_SMP */
+
+static inline int pipeline_request_resched_ipi(void (*handler)(void))
+{
+	return 0;
+}
+
+
+static inline void pipeline_free_resched_ipi(void)
+{
+}
+
+#endif	/* CONFIG_SMP */
+
+static inline void pipeline_prepare_panic(void)
+{
+	/* N/A */
+}
+
+static inline void pipeline_collect_features(struct cobalt_featinfo *f)
+{
+	f->clock_freq = 0;	/* N/A */
+}
+
+#ifndef pipeline_get_syscall_args
+static inline void pipeline_get_syscall_args(struct task_struct *task,
+					     struct pt_regs *regs,
+					     unsigned long *args)
+{
+	syscall_get_arguments(task, regs, args);
+}
+#endif	/* !pipeline_get_syscall_args */
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_PIPELINE_H */
+++ linux-patched/include/xenomai/pipeline/trace.h	2022-03-21 12:58:31.945864037 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/clock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_DOVETAIL_TRACE_H
+#define _COBALT_KERNEL_DOVETAIL_TRACE_H
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <cobalt/uapi/kernel/trace.h>
+#include <trace/events/cobalt-core.h>
+#include <cobalt/kernel/assert.h>
+
+static inline int xntrace_max_begin(unsigned long v)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_max_end(unsigned long v)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_max_reset(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_user_start(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_user_stop(unsigned long v)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_user_freeze(unsigned long v, int once)
+{
+	trace_cobalt_trace_longval(0, v);
+	trace_cobalt_trigger("user-freeze");
+	return 0;
+}
+
+static inline void xntrace_latpeak_freeze(int delay)
+{
+	trace_cobalt_latpeak(delay);
+	trace_cobalt_trigger("latency-freeze");
+}
+
+static inline int xntrace_special(unsigned char id, unsigned long v)
+{
+	trace_cobalt_trace_longval(id, v);
+	return 0;
+}
+
+static inline int xntrace_special_u64(unsigned char id,
+				unsigned long long v)
+{
+	trace_cobalt_trace_longval(id, v);
+	return 0;
+}
+
+static inline int xntrace_pid(pid_t pid, short prio)
+{
+	trace_cobalt_trace_pid(pid, prio);
+	return 0;
+}
+
+static inline int xntrace_tick(unsigned long delay_ticks) /* ns */
+{
+	trace_cobalt_tick_shot(delay_ticks);
+	return 0;
+}
+
+static inline int xntrace_panic_freeze(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_panic_dump(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline bool xntrace_enabled(void)
+{
+	return IS_ENABLED(CONFIG_DOVETAIL_TRACE);
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_TRACE_H */
+++ linux-patched/include/xenomai/pipeline/clock.h	2022-03-21 12:58:31.938864105 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/ipc.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_CLOCK_H
+#define _COBALT_KERNEL_DOVETAIL_CLOCK_H
+
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/kernel/assert.h>
+#include <linux/ktime.h>
+#include <linux/errno.h>
+
+struct timespec64;
+
+static inline u64 pipeline_read_cycle_counter(void)
+{
+	/*
+	 * With Dovetail, our idea of time is directly based on a
+	 * refined count of nanoseconds since the epoch, the hardware
+	 * time counter is transparent to us. For this reason,
+	 * xnclock_ticks_to_ns() and xnclock_ns_to_ticks() are
+	 * idempotent when building for Dovetail.
+	 */
+	return ktime_get_mono_fast_ns();
+}
+
+static inline xnticks_t pipeline_read_wallclock(void)
+{
+	return ktime_get_real_fast_ns();
+}
+
+static inline int pipeline_set_wallclock(xnticks_t epoch_ns)
+{
+	return -EOPNOTSUPP;
+}
+
+void pipeline_set_timer_shot(unsigned long cycles);
+
+const char *pipeline_timer_name(void);
+
+static inline const char *pipeline_clock_name(void)
+{
+	/* Return the name of the current clock source. */
+	TODO();
+
+	return "?";
+}
+
+static inline int pipeline_get_host_time(struct timespec64 *tp)
+{
+	/* Convert ktime_get_real_fast_ns() to timespec. */
+	*tp = ktime_to_timespec64(ktime_get_real_fast_ns());
+
+	return 0;
+}
+
+static inline void pipeline_init_clock(void)
+{
+	/* N/A */
+}
+
+static inline xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks)
+{
+	return ticks;
+}
+
+static inline xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks)
+{
+	return ticks;
+}
+
+static inline xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns)
+{
+	return ns;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_CLOCK_H */
+++ linux-patched/include/xenomai/rtdm/uapi/ipc.h	2022-03-21 12:58:32.298860595 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/udd.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _RTDM_UAPI_IPC_H
+#define _RTDM_UAPI_IPC_H
+
+/**
+ * @ingroup rtdm_profiles
+ * @defgroup rtdm_ipc Real-time IPC
+ *
+ * @b Profile @b Revision: 1
+ * @n
+ * @n
+ * @par Device Characteristics
+ * @n
+ * @ref rtdm_driver_flags "Device Flags": @c RTDM_PROTOCOL_DEVICE @n
+ * @n
+ * @ref rtdm_driver.protocol_family "Protocol Family": @c PF_RTIPC @n
+ * @n
+ * @ref rtdm_driver.socket_type "Socket Type": @c SOCK_DGRAM @n
+ * @n
+ * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_RTIPC @n
+ * @n
+ * @{
+ *
+ * @anchor rtipc_operations @name Supported operations
+ * Standard socket operations supported by the RTIPC protocols.
+ * @{
+ */
+
+/** Create an endpoint for communication in the AF_RTIPC domain.
+ *
+ * @param[in] domain The communication domain. Must be AF_RTIPC.
+ *
+ * @param[in] type The socket type. Must be SOCK_DGRAM.
+ *
+ * @param [in] protocol Any of @ref IPCPROTO_XDDP, @ref IPCPROTO_IDDP,
+ * or @ref IPCPROTO_BUFP. @ref IPCPROTO_IPC is also valid, and refers
+ * to the default RTIPC protocol, namely @ref IPCPROTO_IDDP.
+ *
+ * @return In addition to the standard error codes for @c socket(2),
+ * the following specific error code may be returned:
+ * - -ENOPROTOOPT (Protocol is known, but not compiled in the RTIPC driver).
+ *   See @ref RTIPC_PROTO "RTIPC protocols"
+ *   for available protocols.
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int socket__AF_RTIPC(int domain =AF_RTIPC, int type =SOCK_DGRAM, int protocol);
+#endif
+
+/**
+ * Close a RTIPC socket descriptor.
+ *
+ * Blocking calls to any of the @ref sendmsg__AF_RTIPC "sendmsg" or @ref
+ * recvmsg__AF_RTIPC "recvmsg" functions will be unblocked when the socket
+ * is closed and return with an error.
+ *
+ * @param[in] sockfd The socket descriptor to close.
+ *
+ * @return In addition to the standard error codes for @c close(2),
+ * the following specific error code may be returned:
+ * none
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int close__AF_RTIPC(int sockfd);
+#endif
+
+/**
+ * Bind a RTIPC socket to a port.
+ *
+ * Bind the socket to a destination port.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param [in] addr The address to bind the socket to (see struct
+ * sockaddr_ipc). The meaning of such address depends on the RTIPC
+ * protocol in use for the socket:
+ *
+ * - IPCPROTO_XDDP
+ *
+ *   This action creates an endpoint for channelling traffic between
+ *   the Xenomai and Linux domains.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and
+ *   CONFIG_XENO_OPT_PIPE_NRDEV-1.
+ *
+ *   If @em sipc_port is -1, a free port will be assigned automatically.
+ *
+ *   Upon success, the pseudo-device /dev/rtp@em N will be reserved
+ *   for this communication channel, where @em N is the assigned port
+ *   number. The non real-time side shall open this device to exchange
+ *   data over the bound socket.
+ *
+ * @anchor xddp_label_binding
+ *   If a label was assigned (see @ref XDDP_LABEL) prior to
+ *   binding the socket to a port, a registry link referring to the
+ *   created pseudo-device will be automatically set up as
+ *   @c /proc/xenomai/registry/rtipc/xddp/@em label, where @em label is the
+ *   label string passed to setsockopt() for the @ref XDDP_LABEL option.
+ *
+ * - IPCPROTO_IDDP
+ *
+ *   This action creates an endpoint for exchanging datagrams within
+ *   the Xenomai domain.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and
+ *   CONFIG_XENO_OPT_IDDP_NRPORT-1.
+ *
+ *   If @em sipc_port is -1, a free port will be assigned
+ *   automatically. The real-time peer shall connect to the same port
+ *   for exchanging data over the bound socket.
+ *
+ * @anchor iddp_label_binding
+ *   If a label was assigned (see @ref IDDP_LABEL) prior to binding
+ *   the socket to a port, a registry link referring to the assigned
+ *   port number will be automatically set up as @c
+ *   /proc/xenomai/registry/rtipc/iddp/@em label, where @em label is
+ *   the label string passed to setsockopt() for the @ref IDDP_LABEL
+ *   option.
+ *
+ * - IPCPROTO_BUFP
+ *
+ *   This action creates an endpoint for a one-way byte
+ *   stream within the Xenomai domain.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and CONFIG_XENO_OPT_BUFP_NRPORT-1.
+ *
+ *   If @em sipc_port is -1, an available port will be assigned
+ *   automatically. The real-time peer shall connect to the same port
+ *   for exchanging data over the bound socket.
+ *
+ * @anchor bufp_label_binding
+ *   If a label was assigned (see @ref BUFP_LABEL) prior to binding
+ *   the socket to a port, a registry link referring to the assigned
+ *   port number will be automatically set up as @c
+ *   /proc/xenomai/registry/rtipc/bufp/@em label, where @em label is
+ *   the label string passed to setsockopt() for the @a BUFP_LABEL
+ *   option.
+ *
+ * @param[in] addrlen The size in bytes of the structure pointed to by
+ * @a addr.
+ *
+ * @return In addition to the standard error codes for @c
+ * bind(2), the following specific error code may be returned:
+ *   - -EFAULT (Invalid data address given)
+ *   - -ENOMEM (Not enough memory)
+ *   - -EINVAL (Invalid parameter)
+ *   - -EADDRINUSE (Socket already bound to a port, or no port available)
+ *   - -EAGAIN (no registry slot available, check/raise
+ *     CONFIG_XENO_OPT_REGISTRY_NRSLOTS) .
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int bind__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr,
+		   socklen_t addrlen);
+#endif
+
+/**
+ * Initiate a connection on a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param [in] addr The address to connect the socket to (see struct
+ * sockaddr_ipc).
+ *
+ * - If sipc_port is a valid port for the protocol, it is used
+ * verbatim and the connection succeeds immediately, regardless of
+ * whether the destination is bound at the time of the call.
+ *
+ * - If sipc_port is -1 and a label was assigned to the socket,
+ * connect() blocks for the requested amount of time (see @ref
+ * SO_RCVTIMEO) until a socket is bound to the same label via @c
+ * bind(2) (see @ref XDDP_LABEL, @ref IDDP_LABEL, @ref BUFP_LABEL), in
+ * which case a connection is established between both endpoints.
+ *
+ * - If sipc_port is -1 and no label was assigned to the socket, the
+ * default destination address is cleared, meaning that any subsequent
+ * write to the socket will return -EDESTADDRREQ, until a valid
+ * destination address is set via @c connect(2) or @c bind(2).
+ *
+ * @param[in] addrlen The size in bytes of the structure pointed to by
+ * @a addr.
+ *
+ * @return In addition to the standard error codes for @c connect(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int connect__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr,
+		      socklen_t addrlen);
+#endif
+
+/**
+ * Set options on RTIPC sockets.
+ *
+ * These functions allow to set various socket options.
+ * Supported Levels and Options:
+ *
+ * - Level @ref sockopts_socket "SOL_SOCKET"
+ * - Level @ref sockopts_xddp "SOL_XDDP"
+ * - Level @ref sockopts_iddp "SOL_IDDP"
+ * - Level @ref sockopts_bufp "SOL_BUFP"
+ * .
+ *
+ * @return In addition to the standard error codes for @c
+ * setsockopt(2), the following specific error code may
+ * be returned:
+ * follow the option links above.
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int setsockopt__AF_RTIPC(int sockfd, int level, int optname,
+			 const void *optval, socklen_t optlen);
+#endif
+/**
+ * Get options on RTIPC sockets.
+ *
+ * These functions allow to get various socket options.
+ * Supported Levels and Options:
+ *
+ * - Level @ref sockopts_socket "SOL_SOCKET"
+ * - Level @ref sockopts_xddp "SOL_XDDP"
+ * - Level @ref sockopts_iddp "SOL_IDDP"
+ * - Level @ref sockopts_bufp "SOL_BUFP"
+ * .
+ *
+ * @return In addition to the standard error codes for @c
+ * getsockopt(2), the following specific error code may
+ * be returned:
+ * follow the option links above.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getsockopt__AF_RTIPC(int sockfd, int level, int optname,
+			 void *optval, socklen_t *optlen);
+#endif
+
+/**
+ * Send a message on a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param[in] msg The address of the message header conveying the
+ * datagram.
+ *
+ * @param [in] flags Operation flags:
+ *
+ * - MSG_OOB Send out-of-band message.  For all RTIPC protocols except
+ *   @ref IPCPROTO_BUFP, sending out-of-band data actually means
+ *   pushing them to the head of the receiving queue, so that the
+ *   reader will always receive them before normal messages. @ref
+ *   IPCPROTO_BUFP does not support out-of-band sending.
+ *
+ * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be
+ *   blocked whenever the message cannot be sent immediately at the
+ *   time of the call (e.g. memory shortage), but will rather return
+ *   with -EWOULDBLOCK. Unlike other RTIPC protocols, @ref
+ *   IPCPROTO_XDDP accepts but never considers MSG_DONTWAIT since
+ *   writing to a real-time XDDP endpoint is inherently a non-blocking
+ *   operation.
+ *
+ * - MSG_MORE Accumulate data before sending. This flag is accepted by
+ *   the @ref IPCPROTO_XDDP protocol only, and tells the send service
+ *   to accumulate the outgoing data into an internal streaming
+ *   buffer, instead of issuing a datagram immediately for it. See
+ *   @ref XDDP_BUFSZ for more.
+ *
+ * @note No RTIPC protocol allows for short writes, and only complete
+ * messages are sent to the peer.
+ *
+ * @return In addition to the standard error codes for @c sendmsg(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT
+ */
+#ifdef DOXYGEN_CPP
+ssize_t sendmsg__AF_RTIPC(int sockfd, const struct msghdr *msg, int flags);
+#endif
+
+/**
+ * Receive a message from a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param[out] msg The address the message header will be copied at.
+ *
+ * @param [in] flags Operation flags:
+ *
+ * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be
+ *   blocked whenever no message is immediately available for receipt
+ *   at the time of the call, but will rather return with
+ *   -EWOULDBLOCK.
+ *
+ * @note @ref IPCPROTO_BUFP does not allow for short reads and always
+ * returns the requested amount of bytes, except in one situation:
+ * whenever some writer is waiting for sending data upon a buffer full
+ * condition, while the caller would have to wait for receiving a
+ * complete message.  This is usually the sign of a pathological use
+ * of the BUFP socket, like defining an incorrect buffer size via @ref
+ * BUFP_BUFSZ. In that case, a short read is allowed to prevent a
+ * deadlock.
+ *
+ * @return In addition to the standard error codes for @c recvmsg(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT
+ */
+#ifdef DOXYGEN_CPP
+ssize_t recvmsg__AF_RTIPC(int sockfd, struct msghdr *msg, int flags);
+#endif
+
+/**
+ * Get socket name.
+ *
+ * The name of the local endpoint for the socket is copied back (see
+ * struct sockaddr_ipc).
+ *
+ * @return In addition to the standard error codes for @c getsockname(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getsockname__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen);
+#endif
+
+/**
+ * Get socket peer.
+ *
+ * The name of the remote endpoint for the socket is copied back (see
+ * struct sockaddr_ipc). This is the default destination address for
+ * messages sent on the socket. It can be set either explicitly via @c
+ * connect(2), or implicitly via @c bind(2) if no @c connect(2) was
+ * called prior to binding the socket to a port, in which case both
+ * the local and remote names are equal.
+ *
+ * @return In addition to the standard error codes for @c getpeername(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getpeername__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen);
+#endif
+
+/** @} */
+
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/uapi/kernel/pipe.h>
+#include <rtdm/rtdm.h>
+
+/* Address family */
+#define AF_RTIPC		111
+
+/* Protocol family */
+#define PF_RTIPC		AF_RTIPC
+
+/**
+ * @anchor RTIPC_PROTO @name RTIPC protocol list
+ * protocols for the PF_RTIPC protocol family
+ *
+ * @{ */
+enum {
+/** Default protocol (IDDP) */
+	IPCPROTO_IPC  = 0,
+/**
+ * Cross-domain datagram protocol (RT <-> non-RT).
+ *
+ * Real-time Xenomai threads and regular Linux threads may want to
+ * exchange data in a way that does not require the former to leave
+ * the real-time domain (i.e. primary mode). The RTDM-based XDDP
+ * protocol is available for this purpose.
+ *
+ * On the Linux domain side, pseudo-device files named /dev/rtp@em \<minor\>
+ * give regular POSIX threads access to non real-time communication
+ * endpoints, via the standard character-based I/O interface. On the
+ * Xenomai domain side, sockets may be bound to XDDP ports, which act
+ * as proxies to send and receive data to/from the associated
+ * pseudo-device files. Ports and pseudo-device minor numbers are
+ * paired, meaning that e.g. socket port 7 will proxy the traffic to/from
+ * /dev/rtp7.
+ *
+ * All data sent through a bound/connected XDDP socket via @c
+ * sendto(2) or @c write(2) will be passed to the peer endpoint in the
+ * Linux domain, and made available for reading via the standard @c
+ * read(2) system call. Conversely, all data sent using @c write(2)
+ * through the non real-time endpoint will be conveyed to the
+ * real-time socket endpoint, and made available to the @c recvfrom(2)
+ * or @c read(2) system calls.
+ */
+	IPCPROTO_XDDP = 1,
+/**
+ * Intra-domain datagram protocol (RT <-> RT).
+ *
+ * The RTDM-based IDDP protocol enables real-time threads to exchange
+ * datagrams within the Xenomai domain, via socket endpoints.
+ */
+	IPCPROTO_IDDP = 2,
+/**
+ * Buffer protocol (RT <-> RT, byte-oriented).
+ *
+ * The RTDM-based BUFP protocol implements a lightweight,
+ * byte-oriented, one-way Producer-Consumer data path. All messages
+ * written are buffered into a single memory area in strict FIFO
+ * order, until read by the consumer.
+ *
+ * This protocol always prevents short writes, and only allows short
+ * reads when a potential deadlock situation arises (i.e. readers and
+ * writers waiting for each other indefinitely).
+ */
+	IPCPROTO_BUFP = 3,
+	IPCPROTO_MAX
+};
+/** @} */
+
+/**
+ * Port number type for the RTIPC address family.
+ */
+typedef int16_t rtipc_port_t;
+
+/**
+ * Port label information structure.
+ */
+struct rtipc_port_label {
+	/** Port label string, null-terminated. */
+	char label[XNOBJECT_NAME_LEN];
+};
+
+/**
+ * Socket address structure for the RTIPC address family.
+ */
+struct sockaddr_ipc {
+	/** RTIPC address family, must be @c AF_RTIPC */
+	sa_family_t sipc_family;
+	/** Port number. */
+	rtipc_port_t sipc_port;
+};
+
+#define SOL_XDDP		311
+/**
+ * @anchor sockopts_xddp @name XDDP socket options
+ * Setting and getting XDDP socket options.
+ * @{ */
+/**
+ * XDDP label assignment
+ *
+ * ASCII label strings can be attached to XDDP ports, so that opening
+ * the non-RT endpoint can be done by specifying this symbolic device
+ * name rather than referring to a raw pseudo-device entry
+ * (i.e. /dev/rtp@em N).
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref xddp_label_binding
+ * "XDDP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_LABEL		1
+/**
+ * XDDP local pool size configuration
+ *
+ * By default, the memory needed to convey the data is pulled from
+ * Xenomai's system pool. Setting a local pool size overrides this
+ * default for the socket.
+ *
+ * If a non-zero size was configured, a local pool is allocated at
+ * binding time. This pool will provide storage for pending datagrams.
+ *
+ * It is not allowed to configure a local pool size after the socket
+ * was bound. However, multiple configuration calls are allowed prior
+ * to the binding; the last value set will be used.
+ *
+ * @note: the pool memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_POOLSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the local pool to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_POOLSZ		2
+/**
+ * XDDP streaming buffer size configuration
+ *
+ * In addition to sending datagrams, real-time threads may stream data
+ * in a byte-oriented mode through the port as well. This increases
+ * the bandwidth and reduces the overhead, when the overall data to
+ * send to the Linux domain is collected by bits, and keeping the
+ * message boundaries is not required.
+ *
+ * This feature is enabled when a non-zero buffer size is set for the
+ * socket. In that case, the real-time data accumulates into the
+ * streaming buffer when MSG_MORE is passed to any of the @ref
+ * sendmsg__AF_RTIPC "send functions", until:
+ *
+ * - the receiver from the Linux domain wakes up and consumes it,
+ * - a different source port attempts to send data to the same
+ *   destination port,
+ * - MSG_MORE is absent from the send flags,
+ * - the buffer is full,
+ * .
+ * whichever comes first.
+ *
+ * Setting *@a optval to zero disables the streaming buffer, in which
+ * case all sendings are conveyed in separate datagrams, regardless of
+ * MSG_MORE.
+ *
+ * @note only a single streaming buffer exists per socket. When this
+ * buffer is full, the real-time data stops accumulating and sending
+ * operations resume in mere datagram mode. Accumulation may happen
+ * again after some or all data in the streaming buffer is consumed
+ * from the Linux domain endpoint.
+ *
+ * The streaming buffer size may be adjusted multiple times during the
+ * socket lifetime; the latest configuration change will take effect
+ * when the accumulation resumes after the previous buffer was
+ * flushed.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_BUFSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the streaming buffer
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -ENOMEM (Not enough memory)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_BUFSZ		3
+/**
+ * XDDP monitoring callback
+ *
+ * Other RTDM drivers may install a user-defined callback via the @ref
+ * rtdm_setsockopt call from the inter-driver API, in order to collect
+ * particular events occurring on the channel.
+ *
+ * This notification mechanism is particularly useful to monitor a
+ * channel asynchronously while performing other tasks.
+ *
+ * The user-provided routine will be passed the RTDM file descriptor
+ * of the socket receiving the event, the event code, and an optional
+ * argument.  Four events are currently defined, see @ref XDDP_EVENTS.
+ *
+ * The XDDP_EVTIN and XDDP_EVTOUT events are fired on behalf of a
+ * fully atomic context; therefore, care must be taken to keep their
+ * overhead low. In those cases, the Xenomai services that may be
+ * called from the callback are restricted to the set allowed to a
+ * real-time interrupt handler.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_MONITOR
+ * @param [in] optval Pointer to a pointer to function of type int
+ *             (*)(int fd, int event, long arg), containing the address of the
+ *             user-defined callback.Passing a NULL callback pointer
+ *             in @a optval disables monitoring.
+ * @param [in] optlen sizeof(int (*)(int fd, int event, long arg))
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EPERM (Operation not allowed from user-space)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT, kernel space only
+ */
+#define XDDP_MONITOR		4
+/** @} */
+
+/**
+ * @anchor XDDP_EVENTS @name XDDP events
+ * Specific events occurring on XDDP channels, which can be monitored
+ * via the @ref XDDP_MONITOR socket option.
+ *
+ * @{ */
+/**
+ * @ref XDDP_MONITOR "Monitor" writes to the non real-time endpoint.
+ *
+ * XDDP_EVTIN is sent when data is written to the non real-time
+ * endpoint the socket is bound to (i.e. via /dev/rtp@em N), which
+ * means that some input is pending for the real-time endpoint. The
+ * argument is the size of the incoming message.
+ */
+#define XDDP_EVTIN		1
+/**
+ * @ref XDDP_MONITOR "Monitor" reads from the non real-time endpoint.
+ *
+ * XDDP_EVTOUT is sent when the non real-time endpoint successfully
+ * reads a complete message (i.e. via /dev/rtp@em N). The argument is
+ * the size of the outgoing message.
+ */
+#define XDDP_EVTOUT		2
+/**
+ * @ref XDDP_MONITOR "Monitor" close from the non real-time endpoint.
+ *
+ * XDDP_EVTDOWN is sent when the non real-time endpoint is closed. The
+ * argument is always 0.
+ */
+#define XDDP_EVTDOWN		3
+/**
+ * @ref XDDP_MONITOR "Monitor" memory shortage for non real-time
+ * datagrams.
+ *
+ * XDDP_EVTNOBUF is sent when no memory is available from the pool to
+ * hold the message currently sent from the non real-time
+ * endpoint. The argument is the size of the failed allocation. Upon
+ * return from the callback, the caller will block and retry until
+ * enough space is available from the pool; during that process, the
+ * callback might be invoked multiple times, each time a new attempt
+ * to get the required memory fails.
+ */
+#define XDDP_EVTNOBUF		4
+/** @} */
+
+#define SOL_IDDP		312
+/**
+ * @anchor sockopts_iddp @name IDDP socket options
+ * Setting and getting IDDP socket options.
+ * @{ */
+/**
+ * IDDP label assignment
+ *
+ * ASCII label strings can be attached to IDDP ports, in order to
+ * connect sockets to them in a more descriptive way than using plain
+ * numeric port values.
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref iddp_label_binding
+ * "IDDP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_iddp "SOL_IDDP"
+ * @param [in] optname @b IDDP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define IDDP_LABEL		1
+/**
+ * IDDP local pool size configuration
+ *
+ * By default, the memory needed to convey the data is pulled from
+ * Xenomai's system pool. Setting a local pool size overrides this
+ * default for the socket.
+ *
+ * If a non-zero size was configured, a local pool is allocated at
+ * binding time. This pool will provide storage for pending datagrams.
+ *
+ * It is not allowed to configure a local pool size after the socket
+ * was bound. However, multiple configuration calls are allowed prior
+ * to the binding; the last value set will be used.
+ *
+ * @note: the pool memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_iddp "SOL_IDDP"
+ * @param [in] optname @b IDDP_POOLSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the local pool to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define IDDP_POOLSZ		2
+/** @} */
+
+#define SOL_BUFP		313
+/**
+ * @anchor sockopts_bufp @name BUFP socket options
+ * Setting and getting BUFP socket options.
+ * @{ */
+/**
+ * BUFP label assignment
+ *
+ * ASCII label strings can be attached to BUFP ports, in order to
+ * connect sockets to them in a more descriptive way than using plain
+ * numeric port values.
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref bufp_label_binding
+ * "BUFP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_bufp "SOL_BUFP"
+ * @param [in] optname @b BUFP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define BUFP_LABEL		1
+/**
+ * BUFP buffer size configuration
+ *
+ * All messages written to a BUFP socket are buffered in a single
+ * per-socket memory area. Configuring the size of such buffer prior
+ * to binding the socket to a destination port is mandatory.
+ *
+ * It is not allowed to configure a buffer size after the socket was
+ * bound. However, multiple configuration calls are allowed prior to
+ * the binding; the last value set will be used.
+ *
+ * @note: the buffer memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_bufp "SOL_BUFP"
+ * @param [in] optname @b BUFP_BUFSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the buffer to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define BUFP_BUFSZ		2
+/** @} */
+
+/**
+ * @anchor sockopts_socket @name Socket level options
+ * Setting and getting supported standard socket level options.
+ * @{ */
+/**
+ *
+ * @ref IPCPROTO_IDDP and @ref IPCPROTO_BUFP protocols support the
+ * standard SO_SNDTIMEO socket option, from the @c SOL_SOCKET level.
+ *
+ * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399/
+ */
+#ifdef DOXYGEN_CPP
+#define SO_SNDTIMEO defined_by_kernel_header_file
+#endif
+/**
+ *
+ * All RTIPC protocols support the standard SO_RCVTIMEO socket option,
+ * from the @c SOL_SOCKET level.
+ *
+ * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399/
+ */
+#ifdef DOXYGEN_CPP
+#define SO_RCVTIMEO defined_by_kernel_header_file
+#endif
+/** @} */
+
+/**
+ * @anchor rtdm_ipc_examples @name RTIPC examples
+ * @{ */
+/** @example bufp-readwrite.c */
+/** @example bufp-label.c */
+/** @example iddp-label.c */
+/** @example iddp-sendrecv.c */
+/** @example xddp-echo.c */
+/** @example xddp-label.c */
+/** @example xddp-stream.c */
+/** @} */
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_IPC_H */
+++ linux-patched/include/xenomai/rtdm/uapi/udd.h	2022-03-21 12:58:32.291860663 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/testing.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * This file is part of the Xenomai project.
+ *
+ * @author Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_UDD_H
+#define _RTDM_UAPI_UDD_H
+
+/**
+ * @addtogroup rtdm_udd
+ *
+ * @{
+ */
+
+/**
+ * @anchor udd_signotify
+ * @brief UDD event notification descriptor
+ *
+ * This structure shall be used to pass the information required to
+ * enable/disable the notification by signal upon interrupt receipt.
+ *
+ * If PID is zero or negative, the notification is disabled.
+ * Otherwise, the Cobalt thread whose PID is given will receive the
+ * Cobalt signal also mentioned, along with the count of interrupts at
+ * the time of the receipt stored in siginfo.si_int. A Cobalt thread
+ * must explicitly wait for notifications using the sigwaitinfo() or
+ * sigtimedwait() services (no asynchronous mode available).
+ */
+struct udd_signotify {
+	/**
+	 * PID of the Cobalt thread to notify upon interrupt
+	 * receipt. If @a pid is zero or negative, the notification is
+	 * disabled.
+	 */
+	pid_t pid;
+	/**
+	 * Signal number to send to PID for notifying, which must be
+	 * in the range [SIGRTMIN .. SIGRTMAX] inclusive. This value
+	 * is not considered if @a pid is zero or negative.
+	 */
+	int sig;
+};
+
+/**
+ * @anchor udd_ioctl_codes @name UDD_IOCTL
+ * IOCTL requests
+ *
+ * @{
+ */
+
+/**
+ * Enable the interrupt line. The UDD-class mini-driver should handle
+ * this request when received through its ->ioctl() handler if
+ * provided. Otherwise, the UDD core enables the interrupt line in the
+ * interrupt controller before returning to the caller.
+ */
+#define UDD_RTIOC_IRQEN		_IO(RTDM_CLASS_UDD, 0)
+/**
+ * Disable the interrupt line. The UDD-class mini-driver should handle
+ * this request when received through its ->ioctl() handler if
+ * provided. Otherwise, the UDD core disables the interrupt line in
+ * the interrupt controller before returning to the caller.
+ *
+ * @note The mini-driver must handle the UDD_RTIOC_IRQEN request for a
+ * custom IRQ from its ->ioctl() handler, otherwise such request
+ * receives -EIO from the UDD core.
+ */
+#define UDD_RTIOC_IRQDIS	_IO(RTDM_CLASS_UDD, 1)
+/**
+ * Enable/Disable signal notification upon interrupt event. A valid
+ * @ref udd_signotify "notification descriptor" must be passed along
+ * with this request, which is handled by the UDD core directly.
+ *
+ * @note The mini-driver must handle the UDD_RTIOC_IRQDIS request for
+ * a custom IRQ from its ->ioctl() handler, otherwise such request
+ * receives -EIO from the UDD core.
+ */
+#define UDD_RTIOC_IRQSIG	_IOW(RTDM_CLASS_UDD, 2, struct udd_signotify)
+
+/** @} */
+/** @} */
+
+#endif /* !_RTDM_UAPI_UDD_H */
+++ linux-patched/include/xenomai/rtdm/uapi/testing.h	2022-03-21 12:58:32.284860731 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/analogy.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, testing device profile header
+ *
+ * @note Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rttesting
+ */
+#ifndef _RTDM_UAPI_TESTING_H
+#define _RTDM_UAPI_TESTING_H
+
+#include <linux/types.h>
+
+#define RTTST_PROFILE_VER		2
+
+typedef struct rttst_bench_res {
+	__s32 avg;
+	__s32 min;
+	__s32 max;
+	__s32 overruns;
+	__s32 test_loops;
+} rttst_bench_res_t;
+
+typedef struct rttst_interm_bench_res {
+	struct rttst_bench_res last;
+	struct rttst_bench_res overall;
+} rttst_interm_bench_res_t;
+
+typedef struct rttst_overall_bench_res {
+	struct rttst_bench_res result;
+	__s32 *histogram_avg;
+	__s32 *histogram_min;
+	__s32 *histogram_max;
+} rttst_overall_bench_res_t;
+
+#define RTTST_TMBENCH_INVALID		-1 /* internal use only */
+#define RTTST_TMBENCH_TASK		0
+#define RTTST_TMBENCH_HANDLER		1
+
+typedef struct rttst_tmbench_config {
+	int mode;
+	int priority;
+	__u64 period;
+	int warmup_loops;
+	int histogram_size;
+	int histogram_bucketsize;
+	int freeze_max;
+} rttst_tmbench_config_t;
+
+struct rttst_swtest_task {
+	unsigned int index;
+	unsigned int flags;
+};
+
+/* Possible values for struct rttst_swtest_task::flags. */
+#define RTTST_SWTEST_FPU		0x1
+#define RTTST_SWTEST_USE_FPU		0x2 /* Only for kernel-space tasks. */
+#define RTTST_SWTEST_FREEZE		0x4 /* Only for kernel-space tasks. */
+
+struct rttst_swtest_dir {
+	unsigned int from;
+	unsigned int to;
+};
+
+struct rttst_swtest_error {
+	struct rttst_swtest_dir last_switch;
+	unsigned int fp_val;
+};
+
+#define RTTST_RTDM_NORMAL_CLOSE		0
+#define RTTST_RTDM_DEFER_CLOSE_CONTEXT	1
+
+#define RTTST_RTDM_MAGIC_PRIMARY	0xfefbfefb
+#define RTTST_RTDM_MAGIC_SECONDARY	0xa5b9a5b9
+
+#define RTTST_HEAPCHECK_ZEROOVRD   1
+#define RTTST_HEAPCHECK_SHUFFLE    2
+#define RTTST_HEAPCHECK_PATTERN    4
+#define RTTST_HEAPCHECK_HOT        8
+
+struct rttst_heap_parms {
+	__u64 heap_size;
+	__u64 block_size;
+	int flags;
+	int nrstats;
+};
+
+struct rttst_heap_stats {
+	__u64 heap_size;
+	__u64 user_size;
+	__u64 block_size;
+	__s64 alloc_avg_ns;
+	__s64 alloc_max_ns;
+	__s64 free_avg_ns;
+	__s64 free_max_ns;
+	__u64 maximum_free;
+	__u64 largest_free;
+	int nrblocks;
+	int flags;
+};
+
+struct rttst_heap_stathdr {
+	int nrstats;
+	struct rttst_heap_stats *buf;
+};
+
+#define RTIOC_TYPE_TESTING		RTDM_CLASS_TESTING
+
+/*!
+ * @name Sub-Classes of RTDM_CLASS_TESTING
+ * @{ */
+/** subclass name: "timerbench" */
+#define RTDM_SUBCLASS_TIMERBENCH	0
+/** subclass name: "irqbench" */
+#define RTDM_SUBCLASS_IRQBENCH		1
+/** subclass name: "switchtest" */
+#define RTDM_SUBCLASS_SWITCHTEST	2
+/** subclase name: "rtdm" */
+#define RTDM_SUBCLASS_RTDMTEST		3
+/** subclase name: "heapcheck" */
+#define RTDM_SUBCLASS_HEAPCHECK		4
+/** @} */
+
+/*!
+ * @anchor TSTIOCTLs @name IOCTLs
+ * Testing device IOCTLs
+ * @{ */
+#define RTTST_RTIOC_INTERM_BENCH_RES \
+	_IOWR(RTIOC_TYPE_TESTING, 0x00, struct rttst_interm_bench_res)
+
+#define RTTST_RTIOC_TMBENCH_START \
+	_IOW(RTIOC_TYPE_TESTING, 0x10, struct rttst_tmbench_config)
+
+#define RTTST_RTIOC_TMBENCH_STOP \
+	_IOWR(RTIOC_TYPE_TESTING, 0x11, struct rttst_overall_bench_res)
+
+#define RTTST_RTIOC_SWTEST_SET_TASKS_COUNT \
+	_IOW(RTIOC_TYPE_TESTING, 0x30, __u32)
+
+#define RTTST_RTIOC_SWTEST_SET_CPU \
+	_IOW(RTIOC_TYPE_TESTING, 0x31, __u32)
+
+#define RTTST_RTIOC_SWTEST_REGISTER_UTASK \
+	_IOW(RTIOC_TYPE_TESTING, 0x32, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_CREATE_KTASK \
+	_IOWR(RTIOC_TYPE_TESTING, 0x33, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_PEND \
+	_IOR(RTIOC_TYPE_TESTING, 0x34, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_SWITCH_TO \
+	_IOR(RTIOC_TYPE_TESTING, 0x35, struct rttst_swtest_dir)
+
+#define RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT \
+	_IOR(RTIOC_TYPE_TESTING, 0x36, __u32)
+
+#define RTTST_RTIOC_SWTEST_GET_LAST_ERROR \
+	_IOR(RTIOC_TYPE_TESTING, 0x37, struct rttst_swtest_error)
+
+#define RTTST_RTIOC_SWTEST_SET_PAUSE \
+	_IOW(RTIOC_TYPE_TESTING, 0x38, __u32)
+
+#define RTTST_RTIOC_RTDM_DEFER_CLOSE \
+	_IOW(RTIOC_TYPE_TESTING, 0x40, __u32)
+
+#define RTTST_RTIOC_RTDM_ACTOR_GET_CPU \
+	_IOR(RTIOC_TYPE_TESTING, 0x41, __u32)
+  
+#define RTTST_RTIOC_RTDM_PING_PRIMARY \
+	_IOR(RTIOC_TYPE_TESTING, 0x42, __u32)
+  
+#define RTTST_RTIOC_RTDM_PING_SECONDARY \
+	_IOR(RTIOC_TYPE_TESTING, 0x43, __u32)
+
+#define RTTST_RTIOC_HEAP_CHECK \
+	_IOR(RTIOC_TYPE_TESTING, 0x44, struct rttst_heap_parms)
+
+#define RTTST_RTIOC_HEAP_STAT_COLLECT \
+	_IOR(RTIOC_TYPE_TESTING, 0x45, int)
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_TESTING_H */
+++ linux-patched/include/xenomai/rtdm/uapi/analogy.h	2022-03-21 12:58:32.276860809 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/gpio.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, UAPI bits
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_ANALOGY_H
+#define _RTDM_UAPI_ANALOGY_H
+
+/* --- Misc precompilation constant --- */
+#define A4L_NAMELEN 20
+
+#define A4L_INFINITE 0
+#define A4L_NONBLOCK (-1)
+
+/* --- Common Analogy types --- */
+
+typedef unsigned short sampl_t;
+typedef unsigned long lsampl_t;
+
+/* MMAP ioctl argument structure */
+struct a4l_mmap_arg {
+	unsigned int idx_subd;
+	unsigned long size;
+	void *ptr;
+};
+typedef struct a4l_mmap_arg a4l_mmap_t;
+
+/* Constants related with buffer size
+   (might be used with BUFCFG ioctl) */
+#define A4L_BUF_MAXSIZE 0x1000000
+#define A4L_BUF_DEFSIZE 0x10000
+#define A4L_BUF_DEFMAGIC 0xffaaff55
+
+/* BUFCFG ioctl argument structure */
+struct a4l_buffer_config {
+	/* NOTE: with the last buffer implementation, the field
+	   idx_subd became useless; the buffer are now
+	   per-context. So, the buffer size configuration is specific
+	   to an opened device. There is a little exception: we can
+	   define a default buffer size for a device.
+	   So far, a hack is used to implement the configuration of
+	   the default buffer size */
+	unsigned int idx_subd;
+	unsigned long buf_size;
+};
+typedef struct a4l_buffer_config a4l_bufcfg_t;
+
+/* BUFINFO ioctl argument structure */
+struct a4l_buffer_info {
+	unsigned int idx_subd;
+	unsigned long buf_size;
+	unsigned long rw_count;
+};
+typedef struct a4l_buffer_info a4l_bufinfo_t;
+
+/* BUFCFG2 / BUFINFO2 ioctl argument structure */
+struct a4l_buffer_config2 {
+	unsigned long wake_count;
+	unsigned long reserved[3];
+};
+typedef struct a4l_buffer_config2 a4l_bufcfg2_t;
+
+/* POLL ioctl argument structure */
+struct a4l_poll {
+	unsigned int idx_subd;
+	unsigned long arg;
+};
+typedef struct a4l_poll a4l_poll_t;
+
+/* DEVCFG ioctl argument structure */
+struct a4l_link_desc {
+	unsigned char bname_size;
+	char *bname;
+	unsigned int opts_size;
+	void *opts;
+};
+typedef struct a4l_link_desc a4l_lnkdesc_t;
+
+/* DEVINFO ioctl argument structure */
+struct a4l_dev_info {
+	char board_name[A4L_NAMELEN];
+	char driver_name[A4L_NAMELEN];
+	int nb_subd;
+	int idx_read_subd;
+	int idx_write_subd;
+};
+typedef struct a4l_dev_info a4l_dvinfo_t;
+
+#define CIO 'd'
+#define A4L_DEVCFG _IOW(CIO,0,a4l_lnkdesc_t)
+#define A4L_DEVINFO _IOR(CIO,1,a4l_dvinfo_t)
+#define A4L_SUBDINFO _IOR(CIO,2,a4l_sbinfo_t)
+#define A4L_CHANINFO _IOR(CIO,3,a4l_chinfo_arg_t)
+#define A4L_RNGINFO _IOR(CIO,4,a4l_rnginfo_arg_t)
+#define A4L_CMD _IOWR(CIO,5,a4l_cmd_t)
+#define A4L_CANCEL _IOR(CIO,6,unsigned int)
+#define A4L_INSNLIST _IOR(CIO,7,unsigned int)
+#define A4L_INSN _IOR(CIO,8,unsigned int)
+#define A4L_BUFCFG _IOR(CIO,9,a4l_bufcfg_t)
+#define A4L_BUFINFO _IOWR(CIO,10,a4l_bufinfo_t)
+#define A4L_POLL _IOR(CIO,11,unsigned int)
+#define A4L_MMAP _IOWR(CIO,12,unsigned int)
+#define A4L_NBCHANINFO _IOR(CIO,13,a4l_chinfo_arg_t)
+#define A4L_NBRNGINFO _IOR(CIO,14,a4l_rnginfo_arg_t)
+
+/* These IOCTLs are bound to be merged with A4L_BUFCFG and A4L_BUFINFO
+   at the next major release */
+#define A4L_BUFCFG2 _IOR(CIO,15,a4l_bufcfg_t)
+#define A4L_BUFINFO2 _IOWR(CIO,16,a4l_bufcfg_t)
+
+/*!
+ * @addtogroup analogy_lib_async1
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_CMD_xxx @name ANALOGY_CMD_xxx
+ * @brief Common command flags definitions
+ * @{
+ */
+
+/**
+ * Do not execute the command, just check it
+ */
+#define A4L_CMD_SIMUL 0x1
+/**
+ * Perform data recovery / transmission in bulk mode
+ */
+#define A4L_CMD_BULK 0x2
+/**
+ * Perform a command which will write data to the device
+ */
+#define A4L_CMD_WRITE 0x4
+
+	  /*! @} ANALOGY_CMD_xxx */
+
+/*!
+ * @anchor TRIG_xxx @name TRIG_xxx
+ * @brief Command triggers flags definitions
+ * @{
+ */
+
+/**
+ * Never trigger
+ */
+#define TRIG_NONE	0x00000001
+/**
+ * Trigger now + N ns
+ */
+#define TRIG_NOW	0x00000002
+/**
+ * Trigger on next lower level trig
+ */
+#define TRIG_FOLLOW	0x00000004
+/**
+ * Trigger at time N ns
+ */
+#define TRIG_TIME	0x00000008
+/**
+ * Trigger at rate N ns
+ */
+#define TRIG_TIMER	0x00000010
+/**
+ * Trigger when count reaches N
+ */
+#define TRIG_COUNT	0x00000020
+/**
+ * Trigger on external signal N
+ */
+#define TRIG_EXT	0x00000040
+/**
+ * Trigger on analogy-internal signal N
+ */
+#define TRIG_INT	0x00000080
+/**
+ * Driver defined trigger
+ */
+#define TRIG_OTHER	0x00000100
+/**
+ * Wake up on end-of-scan
+ */
+#define TRIG_WAKE_EOS	0x0020
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_MASK 0x00030000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_NEAREST 0x00000000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_DOWN 0x00010000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_UP 0x00020000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_UP_NEXT 0x00030000
+
+	  /*! @} TRIG_xxx */
+
+/*!
+ * @anchor CHAN_RNG_AREF @name Channel macros
+ * @brief Specific precompilation macros and constants useful for the
+ * channels descriptors tab located in the command structure
+ * @{
+ */
+
+/**
+ * Channel indication macro
+ */
+#define CHAN(a) ((a) & 0xffff)
+/**
+ * Range definition macro
+ */
+#define RNG(a) (((a) & 0xff) << 16)
+/**
+ * Reference definition macro
+ */
+#define AREF(a) (((a) & 0x03) << 24)
+/**
+ * Flags definition macro
+ */
+#define FLAGS(a) ((a) & CR_FLAGS_MASK)
+/**
+ * Channel + range + reference definition macro
+ */
+#define PACK(a, b, c) (a | RNG(b) | AREF(c))
+/**
+ * Channel + range + reference + flags definition macro
+ */
+#define PACK_FLAGS(a, b, c, d) (PACK(a, b, c) | FLAGS(d))
+
+/**
+ * Analog reference is analog ground
+ */
+#define AREF_GROUND 0x00
+/**
+ * Analog reference is analog common
+ */
+#define AREF_COMMON 0x01
+/**
+ * Analog reference is differential
+ */
+#define AREF_DIFF 0x02
+/**
+ * Analog reference is undefined
+ */
+#define AREF_OTHER 0x03
+
+	  /*! @} CHAN_RNG_AREF */
+
+#if !defined(DOXYGEN_CPP)
+
+#define CR_FLAGS_MASK 0xfc000000
+#define CR_ALT_FILTER (1<<26)
+#define CR_DITHER CR_ALT_FILTER
+#define CR_DEGLITCH CR_ALT_FILTER
+#define CR_ALT_SOURCE (1<<27)
+#define CR_EDGE	(1<<30)
+#define CR_INVERT (1<<31)
+
+#endif /* !DOXYGEN_CPP */
+
+/*!
+ * @brief Structure describing the asynchronous instruction
+ * @see a4l_snd_command()
+ */
+
+struct a4l_cmd_desc {
+	unsigned char idx_subd;
+			       /**< Subdevice to which the command will be applied. */
+
+	unsigned long flags;
+			       /**< Command flags */
+
+	/* Command trigger characteristics */
+	unsigned int start_src;
+			       /**< Start trigger type */
+	unsigned int start_arg;
+			       /**< Start trigger argument */
+	unsigned int scan_begin_src;
+			       /**< Scan begin trigger type */
+	unsigned int scan_begin_arg;
+			       /**< Scan begin trigger argument */
+	unsigned int convert_src;
+			       /**< Convert trigger type */
+	unsigned int convert_arg;
+			       /**< Convert trigger argument */
+	unsigned int scan_end_src;
+			       /**< Scan end trigger type */
+	unsigned int scan_end_arg;
+			       /**< Scan end trigger argument */
+	unsigned int stop_src;
+			       /**< Stop trigger type */
+	unsigned int stop_arg;
+			   /**< Stop trigger argument */
+
+	unsigned char nb_chan;
+			   /**< Count of channels related with the command */
+	unsigned int *chan_descs;
+			    /**< Tab containing channels descriptors */
+
+	/* Driver specific fields */
+	unsigned int valid_simul_stages;
+			   /** < cmd simulation valid stages (driver dependent) */
+
+	unsigned int data_len;
+			   /**< Driver specific buffer size */
+	sampl_t *data;
+	                   /**< Driver specific buffer pointer */
+};
+typedef struct a4l_cmd_desc a4l_cmd_t;
+
+/*! @} analogy_lib_async1 */
+
+/* --- Range section --- */
+
+/** Constant for internal use only (must not be used by driver
+    developer).  */
+#define A4L_RNG_FACTOR 1000000
+
+/**
+ * Volt unit range flag
+ */
+#define A4L_RNG_VOLT_UNIT 0x0
+/**
+ * MilliAmpere unit range flag
+ */
+#define A4L_RNG_MAMP_UNIT 0x1
+/**
+ * No unit range flag
+ */
+#define A4L_RNG_NO_UNIT 0x2
+/**
+ * External unit range flag
+ */
+#define A4L_RNG_EXT_UNIT 0x4
+
+/**
+ * Macro to retrieve the range unit from the range flags
+ */
+#define A4L_RNG_UNIT(x) (x & (A4L_RNG_VOLT_UNIT |	\
+			      A4L_RNG_MAMP_UNIT |	\
+			      A4L_RNG_NO_UNIT |		\
+			      A4L_RNG_EXT_UNIT))
+
+/* --- Subdevice flags desc stuff --- */
+
+/* TODO: replace ANALOGY_SUBD_AI with ANALOGY_SUBD_ANALOG
+   and ANALOGY_SUBD_INPUT */
+
+/* Subdevice types masks */
+#define A4L_SUBD_MASK_READ 0x80000000
+#define A4L_SUBD_MASK_WRITE 0x40000000
+#define A4L_SUBD_MASK_SPECIAL 0x20000000
+
+/*!
+ * @addtogroup analogy_subdevice
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_SUBD_xxx @name Subdevices types
+ * @brief Flags to define the subdevice type
+ * @{
+ */
+
+/**
+ * Unused subdevice
+ */
+#define A4L_SUBD_UNUSED (A4L_SUBD_MASK_SPECIAL|0x1)
+/**
+ * Analog input subdevice
+ */
+#define A4L_SUBD_AI (A4L_SUBD_MASK_READ|0x2)
+/**
+ * Analog output subdevice
+ */
+#define A4L_SUBD_AO (A4L_SUBD_MASK_WRITE|0x4)
+/**
+ * Digital input subdevice
+ */
+#define A4L_SUBD_DI (A4L_SUBD_MASK_READ|0x8)
+/**
+ * Digital output subdevice
+ */
+#define A4L_SUBD_DO (A4L_SUBD_MASK_WRITE|0x10)
+/**
+ * Digital input/output subdevice
+ */
+#define A4L_SUBD_DIO (A4L_SUBD_MASK_SPECIAL|0x20)
+/**
+ * Counter subdevice
+ */
+#define A4L_SUBD_COUNTER (A4L_SUBD_MASK_SPECIAL|0x40)
+/**
+ * Timer subdevice
+ */
+#define A4L_SUBD_TIMER (A4L_SUBD_MASK_SPECIAL|0x80)
+/**
+ * Memory, EEPROM, DPRAM
+ */
+#define A4L_SUBD_MEMORY (A4L_SUBD_MASK_SPECIAL|0x100)
+/**
+ * Calibration subdevice  DACs
+ */
+#define A4L_SUBD_CALIB (A4L_SUBD_MASK_SPECIAL|0x200)
+/**
+ * Processor, DSP
+ */
+#define A4L_SUBD_PROC (A4L_SUBD_MASK_SPECIAL|0x400)
+/**
+ * Serial IO subdevice
+ */
+#define A4L_SUBD_SERIAL (A4L_SUBD_MASK_SPECIAL|0x800)
+/**
+ * Mask which gathers all the types
+ */
+#define A4L_SUBD_TYPES (A4L_SUBD_UNUSED |	 \
+			   A4L_SUBD_AI |	 \
+			   A4L_SUBD_AO |	 \
+			   A4L_SUBD_DI |	 \
+			   A4L_SUBD_DO |	 \
+			   A4L_SUBD_DIO |	 \
+			   A4L_SUBD_COUNTER | \
+			   A4L_SUBD_TIMER |	 \
+			   A4L_SUBD_MEMORY |	 \
+			   A4L_SUBD_CALIB |	 \
+			   A4L_SUBD_PROC |	 \
+			   A4L_SUBD_SERIAL)
+
+/*! @} ANALOGY_SUBD_xxx */
+
+/*!
+ * @anchor ANALOGY_SUBD_FT_xxx @name Subdevice features
+ * @brief Flags to define the subdevice's capabilities
+ * @{
+ */
+
+/* Subdevice capabilities */
+/**
+ * The subdevice can handle command (i.e it can perform asynchronous
+ * acquisition)
+ */
+#define A4L_SUBD_CMD 0x1000
+/**
+ * The subdevice support mmap operations (technically, any driver can
+ * do it; however, the developer might want that his driver must be
+ * accessed through read / write
+ */
+#define A4L_SUBD_MMAP 0x8000
+
+/*! @} ANALOGY_SUBD_FT_xxx */
+
+/*!
+ * @anchor ANALOGY_SUBD_ST_xxx @name Subdevice status
+ * @brief Flags to define the subdevice's status
+ * @{
+ */
+
+/* Subdevice status flag(s) */
+/**
+ * The subdevice is busy, a synchronous or an asynchronous acquisition
+ * is occuring
+ */
+#define A4L_SUBD_BUSY_NR 0
+#define A4L_SUBD_BUSY (1 << A4L_SUBD_BUSY_NR)
+
+/**
+ * The subdevice is about to be cleaned in the middle of the detach
+ * procedure
+ */
+#define A4L_SUBD_CLEAN_NR 1
+#define A4L_SUBD_CLEAN (1 << A4L_SUBD_CLEAN_NR)
+
+
+/*! @} ANALOGY_SUBD_ST_xxx */
+
+/* --- Subdevice related IOCTL arguments structures --- */
+
+/* SUDBINFO IOCTL argument */
+struct a4l_subd_info {
+	unsigned long flags;
+	unsigned long status;
+	unsigned char nb_chan;
+};
+typedef struct a4l_subd_info a4l_sbinfo_t;
+
+/* CHANINFO / NBCHANINFO IOCTL arguments */
+struct a4l_chan_info {
+	unsigned long chan_flags;
+	unsigned char nb_rng;
+	unsigned char nb_bits;
+};
+typedef struct a4l_chan_info a4l_chinfo_t;
+
+struct a4l_chinfo_arg {
+	unsigned int idx_subd;
+	void *info;
+};
+typedef struct a4l_chinfo_arg a4l_chinfo_arg_t;
+
+/* RNGINFO / NBRNGINFO IOCTL arguments */
+struct a4l_rng_info {
+	long min;
+	long max;
+	unsigned long flags;
+};
+typedef struct a4l_rng_info a4l_rnginfo_t;
+
+struct a4l_rng_info_arg {
+	unsigned int idx_subd;
+	unsigned int idx_chan;
+	void *info;
+};
+typedef struct a4l_rng_info_arg a4l_rnginfo_arg_t;
+
+/*! @} */
+
+#define A4L_INSN_MASK_READ 0x8000000
+#define A4L_INSN_MASK_WRITE 0x4000000
+#define A4L_INSN_MASK_SPECIAL 0x2000000
+
+/*!
+ * @addtogroup analogy_lib_sync1
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_INSN_xxx @name Instruction type
+ * @brief Flags to define the type of instruction
+ * @{
+ */
+
+/**
+ * Read instruction
+ */
+#define A4L_INSN_READ (0 | A4L_INSN_MASK_READ)
+/**
+ * Write instruction
+ */
+#define A4L_INSN_WRITE (1 | A4L_INSN_MASK_WRITE)
+/**
+ * "Bits" instruction
+ */
+#define A4L_INSN_BITS (2 | A4L_INSN_MASK_READ | \
+		       A4L_INSN_MASK_WRITE)
+/**
+ * Configuration instruction
+ */
+#define A4L_INSN_CONFIG (3 | A4L_INSN_MASK_READ | \
+			 A4L_INSN_MASK_WRITE)
+/**
+ * Get time instruction
+ */
+#define A4L_INSN_GTOD (4 | A4L_INSN_MASK_READ | \
+		       A4L_INSN_MASK_SPECIAL)
+/**
+ * Wait instruction
+ */
+#define A4L_INSN_WAIT (5 | A4L_INSN_MASK_WRITE | \
+		       A4L_INSN_MASK_SPECIAL)
+/**
+ * Trigger instruction (to start asynchronous acquisition)
+ */
+#define A4L_INSN_INTTRIG (6 | A4L_INSN_MASK_WRITE | \
+			  A4L_INSN_MASK_SPECIAL)
+
+	  /*! @} ANALOGY_INSN_xxx */
+
+/**
+ * Maximal wait duration
+ */
+#define A4L_INSN_WAIT_MAX 100000
+
+/*!
+ * @anchor INSN_CONFIG_xxx @name Configuration instruction type
+ * @brief Values to define the type of configuration instruction
+ * @{
+ */
+
+#define A4L_INSN_CONFIG_DIO_INPUT		0
+#define A4L_INSN_CONFIG_DIO_OUTPUT		1
+#define A4L_INSN_CONFIG_DIO_OPENDRAIN		2
+#define A4L_INSN_CONFIG_ANALOG_TRIG		16
+#define A4L_INSN_CONFIG_ALT_SOURCE		20
+#define A4L_INSN_CONFIG_DIGITAL_TRIG		21
+#define A4L_INSN_CONFIG_BLOCK_SIZE		22
+#define A4L_INSN_CONFIG_TIMER_1			23
+#define A4L_INSN_CONFIG_FILTER			24
+#define A4L_INSN_CONFIG_CHANGE_NOTIFY		25
+#define A4L_INSN_CONFIG_SERIAL_CLOCK		26
+#define A4L_INSN_CONFIG_BIDIRECTIONAL_DATA	27
+#define A4L_INSN_CONFIG_DIO_QUERY		28
+#define A4L_INSN_CONFIG_PWM_OUTPUT		29
+#define A4L_INSN_CONFIG_GET_PWM_OUTPUT		30
+#define A4L_INSN_CONFIG_ARM			31
+#define A4L_INSN_CONFIG_DISARM			32
+#define A4L_INSN_CONFIG_GET_COUNTER_STATUS	33
+#define A4L_INSN_CONFIG_RESET			34
+#define A4L_INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR	1001	/* Use CTR as single pulsegenerator */
+#define A4L_INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR	1002	/* Use CTR as pulsetraingenerator */
+#define A4L_INSN_CONFIG_GPCT_QUADRATURE_ENCODER	1003	/* Use the counter as encoder */
+#define A4L_INSN_CONFIG_SET_GATE_SRC		2001	/* Set gate source */
+#define A4L_INSN_CONFIG_GET_GATE_SRC		2002	/* Get gate source */
+#define A4L_INSN_CONFIG_SET_CLOCK_SRC		2003	/* Set master clock source */
+#define A4L_INSN_CONFIG_GET_CLOCK_SRC		2004	/* Get master clock source */
+#define A4L_INSN_CONFIG_SET_OTHER_SRC		2005	/* Set other source */
+#define A4L_INSN_CONFIG_SET_COUNTER_MODE	4097
+#define A4L_INSN_CONFIG_SET_ROUTING		4099
+#define A4L_INSN_CONFIG_GET_ROUTING		4109
+
+/*! @} INSN_CONFIG_xxx */
+
+/*!
+ * @anchor ANALOGY_COUNTER_xxx @name Counter status bits
+ * @brief Status bits for INSN_CONFIG_GET_COUNTER_STATUS
+ * @{
+ */
+
+#define A4L_COUNTER_ARMED		0x1
+#define A4L_COUNTER_COUNTING		0x2
+#define A4L_COUNTER_TERMINAL_COUNT	0x4
+
+	  /*! @} ANALOGY_COUNTER_xxx */
+
+/*!
+ * @anchor ANALOGY_IO_DIRECTION @name IO direction
+ * @brief Values to define the IO polarity
+ * @{
+ */
+
+#define A4L_INPUT	0
+#define A4L_OUTPUT	1
+#define A4L_OPENDRAIN	2
+
+	  /*! @} ANALOGY_IO_DIRECTION */
+
+
+/*!
+ * @anchor ANALOGY_EV_xxx @name Events types
+ * @brief Values to define the Analogy events. They might used to send
+ * some specific events through the instruction interface.
+ * @{
+ */
+
+#define A4L_EV_START		0x00040000
+#define A4L_EV_SCAN_BEGIN	0x00080000
+#define A4L_EV_CONVERT		0x00100000
+#define A4L_EV_SCAN_END		0x00200000
+#define A4L_EV_STOP		0x00400000
+
+/*! @} ANALOGY_EV_xxx */
+
+/*!
+ * @brief Structure describing the synchronous instruction
+ * @see a4l_snd_insn()
+ */
+
+struct a4l_instruction {
+	unsigned int type;
+		       /**< Instruction type */
+	unsigned int idx_subd;
+			   /**< Subdevice to which the instruction will be applied. */
+	unsigned int chan_desc;
+			    /**< Channel descriptor */
+	unsigned int data_size;
+			    /**< Size of the intruction data */
+	void *data;
+		    /**< Instruction data */
+};
+typedef struct a4l_instruction a4l_insn_t;
+
+/*!
+ * @brief Structure describing the list of synchronous instructions
+ * @see a4l_snd_insnlist()
+ */
+
+struct a4l_instruction_list {
+	unsigned int count;
+			/**< Instructions count */
+	a4l_insn_t *insns;
+			  /**< Tab containing the instructions pointers */
+};
+typedef struct a4l_instruction_list a4l_insnlst_t;
+
+/*! @} analogy_lib_sync1 */
+
+struct a4l_calibration_subdev {
+	a4l_sbinfo_t *info;
+	char *name;
+	int slen;
+	int idx;
+};
+
+struct a4l_calibration_subdev_data {
+	int index;
+	int channel;
+	int range;
+	int expansion;
+	int nb_coeff;
+	double *coeff;
+
+};
+
+struct a4l_calibration_data {
+	char *driver_name;
+	char *board_name;
+	int nb_ai;
+	struct a4l_calibration_subdev_data *ai;
+	int nb_ao;
+	struct a4l_calibration_subdev_data *ao;
+};
+
+struct a4l_polynomial {
+	int expansion;
+	int order;
+	int nb_coeff;
+	double *coeff;
+};
+
+
+#endif /* _RTDM_UAPI_ANALOGY_H */
+++ linux-patched/include/xenomai/rtdm/uapi/gpio.h	2022-03-21 12:58:32.269860877 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/serial.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_GPIO_H
+#define _RTDM_UAPI_GPIO_H
+
+struct rtdm_gpio_readout {
+	nanosecs_abs_t timestamp;
+	__s32 value;
+};
+
+#define GPIO_RTIOC_DIR_OUT	_IOW(RTDM_CLASS_GPIO, 0, int)
+#define GPIO_RTIOC_DIR_IN	_IO(RTDM_CLASS_GPIO, 1)
+#define GPIO_RTIOC_IRQEN	_IOW(RTDM_CLASS_GPIO, 2, int) /* GPIO trigger */
+#define GPIO_RTIOC_IRQDIS	_IO(RTDM_CLASS_GPIO, 3)
+#define GPIO_RTIOC_REQS		_IO(RTDM_CLASS_GPIO, 4)
+#define GPIO_RTIOC_RELS		_IO(RTDM_CLASS_GPIO, 5)
+#define GPIO_RTIOC_TS_MONO	_IOR(RTDM_CLASS_GPIO, 7, int)
+#define GPIO_RTIOC_TS_REAL	_IOR(RTDM_CLASS_GPIO, 8, int)
+#define GPIO_RTIOC_TS		GPIO_RTIOC_TS_REAL
+
+#define GPIO_TRIGGER_NONE		0x0 /* unspecified */
+#define GPIO_TRIGGER_EDGE_RISING	0x1
+#define GPIO_TRIGGER_EDGE_FALLING	0x2
+#define GPIO_TRIGGER_LEVEL_HIGH		0x4
+#define GPIO_TRIGGER_LEVEL_LOW		0x8
+#define GPIO_TRIGGER_MASK		0xf
+
+#endif /* !_RTDM_UAPI_GPIO_H */
+++ linux-patched/include/xenomai/rtdm/uapi/serial.h	2022-03-21 12:58:32.262860946 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/rtdm.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, serial device profile header
+ *
+ * @note Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rtserial
+ */
+#ifndef _RTDM_UAPI_SERIAL_H
+#define _RTDM_UAPI_SERIAL_H
+
+#define RTSER_PROFILE_VER		3
+
+/*!
+ * @anchor RTSER_DEF_BAUD   @name RTSER_DEF_BAUD
+ * Default baud rate
+ * @{ */
+#define RTSER_DEF_BAUD			9600
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_PARITY   @name RTSER_xxx_PARITY
+ * Number of parity bits
+ * @{ */
+#define RTSER_NO_PARITY			0x00
+#define RTSER_ODD_PARITY		0x01
+#define RTSER_EVEN_PARITY		0x03
+#define RTSER_DEF_PARITY		RTSER_NO_PARITY
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_BITS   @name RTSER_xxx_BITS
+ * Number of data bits
+ * @{ */
+#define RTSER_5_BITS			0x00
+#define RTSER_6_BITS			0x01
+#define RTSER_7_BITS			0x02
+#define RTSER_8_BITS			0x03
+#define RTSER_DEF_BITS			RTSER_8_BITS
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_STOPB   @name RTSER_xxx_STOPB
+ * Number of stop bits
+ * @{ */
+#define RTSER_1_STOPB			0x00
+/** valid only in combination with 5 data bits */
+#define RTSER_1_5_STOPB			0x01
+#define RTSER_2_STOPB			0x01
+#define RTSER_DEF_STOPB			RTSER_1_STOPB
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_HAND   @name RTSER_xxx_HAND
+ * Handshake mechanisms
+ * @{ */
+#define RTSER_NO_HAND			0x00
+#define RTSER_RTSCTS_HAND		0x01
+#define RTSER_DEF_HAND			RTSER_NO_HAND
+/** @} */
+
+/*!
+ * @anchor RTSER_RS485_xxx   @name RTSER_RS485_xxx
+ * RS485 mode with automatic RTS handling
+ * @{ */
+#define RTSER_RS485_DISABLE		0x00
+#define RTSER_RS485_ENABLE		0x01
+#define RTSER_DEF_RS485			RTSER_RS485_DISABLE
+/** @} */
+
+/*!
+ * @anchor RTSER_FIFO_xxx   @name RTSER_FIFO_xxx
+ * Reception FIFO interrupt threshold
+ * @{ */
+#define RTSER_FIFO_DEPTH_1		0x00
+#define RTSER_FIFO_DEPTH_4		0x40
+#define RTSER_FIFO_DEPTH_8		0x80
+#define RTSER_FIFO_DEPTH_14		0xC0
+#define RTSER_DEF_FIFO_DEPTH		RTSER_FIFO_DEPTH_1
+/** @} */
+
+/*!
+ * @anchor RTSER_TIMEOUT_xxx   @name RTSER_TIMEOUT_xxx
+ * Special timeout values, see also @ref RTDM_TIMEOUT_xxx
+ * @{ */
+#define RTSER_TIMEOUT_INFINITE		RTDM_TIMEOUT_INFINITE
+#define RTSER_TIMEOUT_NONE		RTDM_TIMEOUT_NONE
+#define RTSER_DEF_TIMEOUT		RTDM_TIMEOUT_INFINITE
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_TIMESTAMP_HISTORY   @name RTSER_xxx_TIMESTAMP_HISTORY
+ * Timestamp history control
+ * @{ */
+#define RTSER_RX_TIMESTAMP_HISTORY	0x01
+#define RTSER_DEF_TIMESTAMP_HISTORY	0x00
+/** @} */
+
+/*!
+ * @anchor RTSER_EVENT_xxx   @name RTSER_EVENT_xxx
+ * Events bits
+ * @{ */
+#define RTSER_EVENT_RXPEND		0x01
+#define RTSER_EVENT_ERRPEND		0x02
+#define RTSER_EVENT_MODEMHI		0x04
+#define RTSER_EVENT_MODEMLO		0x08
+#define RTSER_EVENT_TXEMPTY		0x10
+#define RTSER_DEF_EVENT_MASK		0x00
+/** @} */
+
+
+/*!
+ * @anchor RTSER_SET_xxx   @name RTSER_SET_xxx
+ * Configuration mask bits
+ * @{ */
+#define RTSER_SET_BAUD			0x0001
+#define RTSER_SET_PARITY		0x0002
+#define RTSER_SET_DATA_BITS		0x0004
+#define RTSER_SET_STOP_BITS		0x0008
+#define RTSER_SET_HANDSHAKE		0x0010
+#define RTSER_SET_FIFO_DEPTH		0x0020
+#define RTSER_SET_TIMEOUT_RX		0x0100
+#define RTSER_SET_TIMEOUT_TX		0x0200
+#define RTSER_SET_TIMEOUT_EVENT		0x0400
+#define RTSER_SET_TIMESTAMP_HISTORY	0x0800
+#define RTSER_SET_EVENT_MASK		0x1000
+#define RTSER_SET_RS485			0x2000
+/** @} */
+
+
+/*!
+ * @anchor RTSER_LSR_xxx   @name RTSER_LSR_xxx
+ * Line status bits
+ * @{ */
+#define RTSER_LSR_DATA			0x01
+#define RTSER_LSR_OVERRUN_ERR		0x02
+#define RTSER_LSR_PARITY_ERR		0x04
+#define RTSER_LSR_FRAMING_ERR		0x08
+#define RTSER_LSR_BREAK_IND		0x10
+#define RTSER_LSR_THR_EMTPY		0x20
+#define RTSER_LSR_TRANSM_EMPTY		0x40
+#define RTSER_LSR_FIFO_ERR		0x80
+#define RTSER_SOFT_OVERRUN_ERR		0x0100
+/** @} */
+
+
+/*!
+ * @anchor RTSER_MSR_xxx   @name RTSER_MSR_xxx
+ * Modem status bits
+ * @{ */
+#define RTSER_MSR_DCTS			0x01
+#define RTSER_MSR_DDSR			0x02
+#define RTSER_MSR_TERI			0x04
+#define RTSER_MSR_DDCD			0x08
+#define RTSER_MSR_CTS			0x10
+#define RTSER_MSR_DSR			0x20
+#define RTSER_MSR_RI			0x40
+#define RTSER_MSR_DCD			0x80
+/** @} */
+
+
+/*!
+ * @anchor RTSER_MCR_xxx   @name RTSER_MCR_xxx
+ * Modem control bits
+ * @{ */
+#define RTSER_MCR_DTR			0x01
+#define RTSER_MCR_RTS			0x02
+#define RTSER_MCR_OUT1			0x04
+#define RTSER_MCR_OUT2			0x08
+#define RTSER_MCR_LOOP			0x10
+/** @} */
+
+
+/*!
+ * @anchor RTSER_BREAK_xxx   @name RTSER_BREAK_xxx
+ * Break control
+ * @{ */
+#define RTSER_BREAK_CLR			0x00
+#define RTSER_BREAK_SET			0x01
+
+
+/**
+ * Serial device configuration
+ */
+typedef struct rtser_config {
+	/** mask specifying valid fields, see @ref RTSER_SET_xxx */
+	int		config_mask;
+
+	/** baud rate, default @ref RTSER_DEF_BAUD */
+	int		baud_rate;
+
+	/** number of parity bits, see @ref RTSER_xxx_PARITY */
+	int		parity;
+
+	/** number of data bits, see @ref RTSER_xxx_BITS */
+	int		data_bits;
+
+	/** number of stop bits, see @ref RTSER_xxx_STOPB */
+	int		stop_bits;
+
+	/** handshake mechanisms, see @ref RTSER_xxx_HAND */
+	int		handshake;
+
+	/** reception FIFO interrupt threshold, see @ref RTSER_FIFO_xxx */
+	int		fifo_depth;
+
+	int		reserved;
+
+	/** reception timeout, see @ref RTSER_TIMEOUT_xxx for special
+	 *  values */
+	nanosecs_rel_t	rx_timeout;
+
+	/** transmission timeout, see @ref RTSER_TIMEOUT_xxx for special
+	 *  values */
+	nanosecs_rel_t	tx_timeout;
+
+	/** event timeout, see @ref RTSER_TIMEOUT_xxx for special values */
+	nanosecs_rel_t	event_timeout;
+
+	/** enable timestamp history, see @ref RTSER_xxx_TIMESTAMP_HISTORY */
+	int		timestamp_history;
+
+	/** event mask to be used with @ref RTSER_RTIOC_WAIT_EVENT, see
+	 *  @ref RTSER_EVENT_xxx */
+	int		event_mask;
+
+	/** enable RS485 mode, see @ref RTSER_RS485_xxx */
+	int		rs485;
+} rtser_config_t;
+
+/**
+ * Serial device status
+ */
+typedef struct rtser_status {
+	/** line status register, see @ref RTSER_LSR_xxx */
+	int		line_status;
+
+	/** modem status register, see @ref RTSER_MSR_xxx */
+	int		modem_status;
+} rtser_status_t;
+
+/**
+ * Additional information about serial device events
+ */
+typedef struct rtser_event {
+	/** signalled events, see @ref RTSER_EVENT_xxx */
+	int		events;
+
+	/** number of pending input characters */
+	int		rx_pending;
+
+	/** last interrupt timestamp */
+	nanosecs_abs_t	last_timestamp;
+
+	/** reception timestamp of oldest character in input queue */
+	nanosecs_abs_t	rxpend_timestamp;
+} rtser_event_t;
+
+
+#define RTIOC_TYPE_SERIAL		RTDM_CLASS_SERIAL
+
+
+/*!
+ * @name Sub-Classes of RTDM_CLASS_SERIAL
+ * @{ */
+#define RTDM_SUBCLASS_16550A		0
+/** @} */
+
+
+/*!
+ * @anchor SERIOCTLs @name IOCTLs
+ * Serial device IOCTLs
+ * @{ */
+
+/**
+ * Get serial device configuration
+ *
+ * @param[out] arg Pointer to configuration buffer (struct rtser_config)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_GET_CONFIG	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x00, struct rtser_config)
+
+/**
+ * Set serial device configuration
+ *
+ * @param[in] arg Pointer to configuration buffer (struct rtser_config)
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EPERM is returned if the caller's context is invalid, see note below.
+ *
+ * - -ENOMEM is returned if a new history buffer for timestamps cannot be
+ * allocated.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note If rtser_config contains a valid timestamp_history and the
+ * addressed device has been opened in non-real-time context, this IOCTL must
+ * be issued in non-real-time context as well. Otherwise, this command will
+ * fail.
+ */
+#define RTSER_RTIOC_SET_CONFIG	\
+	_IOW(RTIOC_TYPE_SERIAL, 0x01, struct rtser_config)
+
+/**
+ * Get serial device status
+ *
+ * @param[out] arg Pointer to status buffer (struct rtser_status)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note The error states @c RTSER_LSR_OVERRUN_ERR, @c RTSER_LSR_PARITY_ERR,
+ * @c RTSER_LSR_FRAMING_ERR, and @c RTSER_SOFT_OVERRUN_ERR that may have
+ * occured during previous read accesses to the device will be saved for being
+ * reported via this IOCTL. Upon return from @c RTSER_RTIOC_GET_STATUS, the
+ * saved state will be cleared.
+ */
+#define RTSER_RTIOC_GET_STATUS	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x02, struct rtser_status)
+
+/**
+ * Get serial device's modem contol register
+ *
+ * @param[out] arg Pointer to variable receiving the content (int, see
+ *             @ref RTSER_MCR_xxx)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_GET_CONTROL	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x03, int)
+
+/**
+ * Set serial device's modem contol register
+ *
+ * @param[in] arg New control register content (int, see @ref RTSER_MCR_xxx)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_SET_CONTROL	\
+	_IOW(RTIOC_TYPE_SERIAL, 0x04, int)
+
+/**
+ * Wait on serial device events according to previously set mask
+ *
+ * @param[out] arg Pointer to event information buffer (struct rtser_event)
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EBUSY is returned if another task is already waiting on events of this
+ * device.
+ *
+ * - -EBADF is returned if the file descriptor is invalid or the device has
+ * just been closed.
+ *
+ * @coretags{mode-unrestricted}
+ */
+#define RTSER_RTIOC_WAIT_EVENT	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x05, struct rtser_event)
+/** @} */
+
+/**
+ * Set or clear break on UART output line
+ *
+ * @param[in] arg @c RTSER_BREAK_SET or @c RTSER_BREAK_CLR (int)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note A set break condition may also be cleared on UART line
+ * reconfiguration.
+ */
+#define RTSER_RTIOC_BREAK_CTL	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x06, int)
+/** @} */
+
+/*!
+ * @anchor SERutils @name RT Serial example and utility programs
+ * @{ */
+/** @example cross-link.c */
+/** @} */
+
+#endif /* !_RTDM_UAPI_SERIAL_H */
+++ linux-patched/include/xenomai/rtdm/uapi/rtdm.h	2022-03-21 12:58:32.254861024 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/gpiopwm.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, user API header.
+ *
+ * @note Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de>
+ * @note Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ * @ingroup rtdm_user_api
+ */
+#ifndef _RTDM_UAPI_RTDM_H
+#define _RTDM_UAPI_RTDM_H
+
+/*!
+ * @addtogroup rtdm
+ * @{
+ */
+
+/*!
+ * @anchor rtdm_api_versioning @name API Versioning
+ * @{ */
+/** Common user and driver API version */
+#define RTDM_API_VER			9
+
+/** Minimum API revision compatible with the current release */
+#define RTDM_API_MIN_COMPAT_VER		9
+/** @} API Versioning */
+
+/** RTDM type for representing absolute dates. Its base type is a 64 bit
+ *  unsigned integer. The unit is 1 nanosecond. */
+typedef uint64_t nanosecs_abs_t;
+
+/** RTDM type for representing relative intervals. Its base type is a 64 bit
+ *  signed integer. The unit is 1 nanosecond. Relative intervals can also
+ *  encode the special timeouts "infinite" and "non-blocking", see
+ *  @ref RTDM_TIMEOUT_xxx. */
+typedef int64_t nanosecs_rel_t;
+
+/*!
+ * @anchor RTDM_TIMEOUT_xxx @name RTDM_TIMEOUT_xxx
+ * Special timeout values
+ * @{ */
+/** Block forever. */
+#define RTDM_TIMEOUT_INFINITE		0
+
+/** Any negative timeout means non-blocking. */
+#define RTDM_TIMEOUT_NONE		(-1)
+/** @} RTDM_TIMEOUT_xxx */
+/** @} rtdm */
+
+/*!
+ * @addtogroup rtdm_profiles
+ * @{
+ */
+
+/*!
+ * @anchor RTDM_CLASS_xxx   @name RTDM_CLASS_xxx
+ * Device classes
+ * @{ */
+#define RTDM_CLASS_PARPORT		1
+#define RTDM_CLASS_SERIAL		2
+#define RTDM_CLASS_CAN			3
+#define RTDM_CLASS_NETWORK		4
+#define RTDM_CLASS_RTMAC		5
+#define RTDM_CLASS_TESTING		6
+#define RTDM_CLASS_RTIPC		7
+#define RTDM_CLASS_COBALT		8
+#define RTDM_CLASS_UDD			9
+#define RTDM_CLASS_MEMORY		10
+#define RTDM_CLASS_GPIO			11
+#define RTDM_CLASS_SPI			12
+#define RTDM_CLASS_PWM			13
+
+#define RTDM_CLASS_MISC			223
+#define RTDM_CLASS_EXPERIMENTAL		224
+#define RTDM_CLASS_MAX			255
+/** @} RTDM_CLASS_xxx */
+
+#define RTDM_SUBCLASS_GENERIC		(-1)
+
+#define RTIOC_TYPE_COMMON		0
+
+/*!
+ * @anchor device_naming    @name Device Naming
+ * Maximum length of device names (excluding the final null character)
+ * @{
+ */
+#define RTDM_MAX_DEVNAME_LEN		31
+/** @} Device Naming */
+
+/**
+ * Device information
+ */
+typedef struct rtdm_device_info {
+	/** Device flags, see @ref dev_flags "Device Flags" for details */
+	int device_flags;
+
+	/** Device class ID, see @ref RTDM_CLASS_xxx */
+	int device_class;
+
+	/** Device sub-class, either RTDM_SUBCLASS_GENERIC or a
+	 *  RTDM_SUBCLASS_xxx definition of the related @ref rtdm_profiles
+	 *  "Device Profile" */
+	int device_sub_class;
+
+	/** Supported device profile version */
+	int profile_version;
+} rtdm_device_info_t;
+
+/*!
+ * @anchor RTDM_PURGE_xxx_BUFFER    @name RTDM_PURGE_xxx_BUFFER
+ * Flags selecting buffers to be purged
+ * @{ */
+#define RTDM_PURGE_RX_BUFFER		0x0001
+#define RTDM_PURGE_TX_BUFFER		0x0002
+/** @} RTDM_PURGE_xxx_BUFFER*/
+
+/*!
+ * @anchor common_IOCTLs    @name Common IOCTLs
+ * The following IOCTLs are common to all device rtdm_profiles.
+ * @{
+ */
+
+/**
+ * Retrieve information about a device or socket.
+ * @param[out] arg Pointer to information buffer (struct rtdm_device_info)
+ */
+#define RTIOC_DEVICE_INFO \
+	_IOR(RTIOC_TYPE_COMMON, 0x00, struct rtdm_device_info)
+
+/**
+ * Purge internal device or socket buffers.
+ * @param[in] arg Purge mask, see @ref RTDM_PURGE_xxx_BUFFER
+ */
+#define RTIOC_PURGE		_IOW(RTIOC_TYPE_COMMON, 0x10, int)
+/** @} Common IOCTLs */
+/** @} rtdm */
+
+/* Internally used for mapping socket functions on IOCTLs */
+struct _rtdm_getsockopt_args {
+	int level;
+	int optname;
+	void *optval;
+	socklen_t *optlen;
+};
+
+struct _rtdm_setsockopt_args {
+	int level;
+	int optname;
+	const void *optval;
+	socklen_t optlen;
+};
+
+struct _rtdm_getsockaddr_args {
+	struct sockaddr *addr;
+	socklen_t *addrlen;
+};
+
+struct _rtdm_setsockaddr_args {
+	const struct sockaddr *addr;
+	socklen_t addrlen;
+};
+
+#define _RTIOC_GETSOCKOPT	_IOW(RTIOC_TYPE_COMMON, 0x20,		\
+				     struct _rtdm_getsockopt_args)
+#define _RTIOC_SETSOCKOPT	_IOW(RTIOC_TYPE_COMMON, 0x21,		\
+				     struct _rtdm_setsockopt_args)
+#define _RTIOC_BIND		_IOW(RTIOC_TYPE_COMMON, 0x22,		\
+				     struct _rtdm_setsockaddr_args)
+#define _RTIOC_CONNECT		_IOW(RTIOC_TYPE_COMMON, 0x23,		\
+				     struct _rtdm_setsockaddr_args)
+#define _RTIOC_LISTEN		_IOW(RTIOC_TYPE_COMMON, 0x24,		\
+				     int)
+#define _RTIOC_ACCEPT		_IOW(RTIOC_TYPE_COMMON, 0x25,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_GETSOCKNAME	_IOW(RTIOC_TYPE_COMMON, 0x26,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_GETPEERNAME	_IOW(RTIOC_TYPE_COMMON, 0x27,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_SHUTDOWN		_IOW(RTIOC_TYPE_COMMON, 0x28,		\
+				     int)
+
+/* Internally used for mmap() */
+struct _rtdm_mmap_request {
+	__u64 offset;
+	size_t length;
+	int prot;
+	int flags;
+};
+
+#endif /* !_RTDM_UAPI_RTDM_H */
+++ linux-patched/include/xenomai/rtdm/uapi/gpiopwm.h	2022-03-21 12:58:32.247861092 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/can.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, pwm header
+ *
+ * @note Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rttesting
+ */
+#ifndef _RTDM_UAPI_PWM_H
+#define _RTDM_UAPI_PWM_H
+
+#include <linux/types.h>
+
+#define RTPWM_PROFILE_VER			1
+
+struct gpiopwm {
+	unsigned int duty_cycle;
+	unsigned int range_min;
+	unsigned int range_max;
+	unsigned int period;
+	unsigned int gpio;
+};
+
+#define RTIOC_TYPE_PWM		RTDM_CLASS_PWM
+
+#define GPIOPWM_RTIOC_SET_CONFIG \
+	_IOW(RTIOC_TYPE_PWM, 0x00, struct gpiopwm)
+
+#define GPIOPWM_RTIOC_GET_CONFIG \
+	_IOR(RTIOC_TYPE_PWM, 0x10, struct gpiopwm)
+
+#define GPIOPWM_RTIOC_START \
+	_IO(RTIOC_TYPE_PWM, 0x20)
+
+#define GPIOPWM_RTIOC_STOP \
+	_IO(RTIOC_TYPE_PWM, 0x30)
+
+#define GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE \
+	_IOW(RTIOC_TYPE_PWM, 0x40, unsigned int)
+
+
+#endif /* !_RTDM_UAPI_TESTING_H */
+++ linux-patched/include/xenomai/rtdm/uapi/can.h	2022-03-21 12:58:32.240861160 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/net.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for RT-Socket-CAN, CAN device profile header
+ *
+ * @note Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * @note Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                         <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * This RTDM CAN device profile header is based on:
+ *
+ * include/linux/can.h, include/linux/socket.h, net/can/pf_can.h in
+ * linux-can.patch, a CAN socket framework for Linux
+ *
+ * Copyright (C) 2004, 2005,
+ * Robert Schwebel, Benedikt Spranger, Marc Kleine-Budde, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_CAN_H
+#define _RTDM_UAPI_CAN_H
+
+/**
+ * @addtogroup rtdm_can
+ * @{
+ */
+
+#define RTCAN_PROFILE_VER  2
+
+#ifndef AF_CAN
+
+/** CAN address family */
+#define AF_CAN	29
+
+/** CAN protocol family */
+#define PF_CAN	AF_CAN
+
+#endif
+
+/** CAN socket levels
+ *
+ *  Used for @ref Sockopts for the particular protocols.
+ */
+#define SOL_CAN_RAW  103
+
+/** Type of CAN id (see @ref CAN_xxx_MASK and @ref CAN_xxx_FLAG) */
+typedef uint32_t can_id_t;
+typedef uint32_t canid_t;
+
+/** Type of CAN error mask */
+typedef can_id_t can_err_mask_t;
+
+/*!
+ * @anchor CAN_xxx_MASK @name CAN ID masks
+ * Bit masks for masking CAN IDs
+ * @{ */
+
+/** Bit mask for extended CAN IDs */
+#define CAN_EFF_MASK  0x1FFFFFFF
+
+/** Bit mask for standard CAN IDs */
+#define CAN_SFF_MASK  0x000007FF
+
+/** @} */
+
+/*!
+ * @anchor CAN_xxx_FLAG @name CAN ID flags
+ * Flags within a CAN ID indicating special CAN frame attributes
+ * @{ */
+/** Extended frame */
+#define CAN_EFF_FLAG  0x80000000
+/** Remote transmission frame */
+#define CAN_RTR_FLAG  0x40000000
+/** Error frame (see @ref Errors), not valid in struct can_filter */
+#define CAN_ERR_FLAG  0x20000000
+/** Invert CAN filter definition, only valid in struct can_filter */
+#define CAN_INV_FILTER CAN_ERR_FLAG
+
+/** @} */
+
+/*!
+ * @anchor CAN_PROTO @name Particular CAN protocols
+ * Possible protocols for the PF_CAN protocol family
+ *
+ * Currently only the RAW protocol is supported.
+ * @{ */
+/** Raw protocol of @c PF_CAN, applicable to socket type @c SOCK_RAW */
+#define CAN_RAW  1
+/** @} */
+
+#define CAN_BAUDRATE_UNKNOWN       ((uint32_t)-1)
+#define CAN_BAUDRATE_UNCONFIGURED  0
+
+/**
+ * Baudrate definition in bits per second
+ */
+typedef uint32_t can_baudrate_t;
+
+/**
+ * Supported CAN bit-time types
+ */
+enum CAN_BITTIME_TYPE {
+	/** Standard bit-time definition according to Bosch */
+	CAN_BITTIME_STD,
+	/** Hardware-specific BTR bit-time definition */
+	CAN_BITTIME_BTR
+};
+
+/**
+ * See @ref CAN_BITTIME_TYPE
+ */
+typedef enum CAN_BITTIME_TYPE can_bittime_type_t;
+
+/**
+ * Standard bit-time parameters according to Bosch
+ */
+struct can_bittime_std {
+	uint32_t brp;		/**< Baud rate prescaler */
+	uint8_t prop_seg;	/**< from 1 to 8 */
+	uint8_t phase_seg1;	/**< from 1 to 8 */
+	uint8_t phase_seg2;	/**< from 1 to 8 */
+	uint8_t sjw:7;		/**< from 1 to 4 */
+	uint8_t sam:1;		/**< 1 - enable triple sampling */
+};
+
+/**
+ * Hardware-specific BTR bit-times
+ */
+struct can_bittime_btr {
+
+	uint8_t btr0;		/**< Bus timing register 0 */
+	uint8_t btr1;		/**< Bus timing register 1 */
+};
+
+/**
+ * Custom CAN bit-time definition
+ */
+struct can_bittime {
+	/** Type of bit-time definition */
+	can_bittime_type_t type;
+
+	union {
+		/** Standard bit-time */
+		struct can_bittime_std std;
+		/** Hardware-spcific BTR bit-time */
+		struct can_bittime_btr btr;
+	};
+};
+
+/*!
+ * @anchor CAN_MODE @name CAN operation modes
+ * Modes into which CAN controllers can be set
+ * @{ */
+enum CAN_MODE {
+	/*! Set controller in Stop mode (no reception / transmission possible) */
+	CAN_MODE_STOP = 0,
+
+	/*! Set controller into normal operation. @n
+	 *  Coming from stopped mode or bus off, the controller begins with no
+	 *  errors in @ref CAN_STATE_ACTIVE. */
+	CAN_MODE_START,
+
+	/*! Set controller into Sleep mode. @n
+	 *  This is only possible if the controller is not stopped or bus-off. @n
+	 *  Notice that sleep mode will only be entered when there is no bus
+	 *  activity. If the controller detects bus activity while "sleeping"
+	 *  it will go into operating mode again. @n
+	 *  To actively leave sleep mode again trigger @c CAN_MODE_START. */
+	CAN_MODE_SLEEP
+};
+/** @} */
+
+/** See @ref CAN_MODE */
+typedef enum CAN_MODE can_mode_t;
+
+/*!
+ * @anchor CAN_CTRLMODE @name CAN controller modes
+ * Special CAN controllers modes, which can be or'ed together.
+ *
+ * @note These modes are hardware-dependent. Please consult the hardware
+ * manual of the CAN controller for more detailed information.
+ *
+ * @{ */
+
+/*! Listen-Only mode
+ *
+ *  In this mode the CAN controller would give no acknowledge to the CAN-bus,
+ *  even if a message is received successfully and messages would not be
+ *  transmitted. This mode might be useful for bus-monitoring, hot-plugging
+ *  or throughput analysis. */
+#define CAN_CTRLMODE_LISTENONLY 0x1
+
+/*! Loopback mode
+ *
+ * In this mode the CAN controller does an internal loop-back, a message is
+ * transmitted and simultaneously received. That mode can be used for self
+ * test operation. */
+#define CAN_CTRLMODE_LOOPBACK   0x2
+
+/*! Triple sampling mode
+ *
+ * In this mode the CAN controller uses Triple sampling. */
+#define CAN_CTRLMODE_3_SAMPLES  0x4
+
+/** @} */
+
+/** See @ref CAN_CTRLMODE */
+typedef int can_ctrlmode_t;
+
+/*!
+ * @anchor CAN_STATE @name CAN controller states
+ * States a CAN controller can be in.
+ * @{ */
+enum CAN_STATE {
+	/** CAN controller is error active */
+	CAN_STATE_ERROR_ACTIVE = 0,
+	/** CAN controller is active */
+	CAN_STATE_ACTIVE = 0,
+
+	/** CAN controller is error active, warning level is reached */
+	CAN_STATE_ERROR_WARNING = 1,
+	/** CAN controller is error active, warning level is reached */
+	CAN_STATE_BUS_WARNING = 1,
+
+	/** CAN controller is error passive */
+	CAN_STATE_ERROR_PASSIVE = 2,
+	/** CAN controller is error passive */
+	CAN_STATE_BUS_PASSIVE = 2,
+
+	/** CAN controller went into Bus Off */
+	CAN_STATE_BUS_OFF,
+
+	/** CAN controller is scanning to get the baudrate */
+	CAN_STATE_SCANNING_BAUDRATE,
+
+	/** CAN controller is in stopped mode */
+	CAN_STATE_STOPPED,
+
+	/** CAN controller is in Sleep mode */
+	CAN_STATE_SLEEPING,
+};
+/** @} */
+
+/** See @ref CAN_STATE */
+typedef enum CAN_STATE can_state_t;
+
+#define CAN_STATE_OPERATING(state) ((state) < CAN_STATE_BUS_OFF)
+
+/**
+ * Filter for reception of CAN messages.
+ *
+ * This filter works as follows:
+ * A received CAN ID is AND'ed bitwise with @c can_mask and then compared to
+ * @c can_id. This also includes the @ref CAN_EFF_FLAG and @ref CAN_RTR_FLAG
+ * of @ref CAN_xxx_FLAG. If this comparison is true, the message will be
+ * received by the socket. The logic can be inverted with the @c can_id flag
+ * @ref CAN_INV_FILTER :
+ *
+ * @code
+ * if (can_id & CAN_INV_FILTER) {
+ *    if ((received_can_id & can_mask) != (can_id & ~CAN_INV_FILTER))
+ *       accept-message;
+ * } else {
+ *    if ((received_can_id & can_mask) == can_id)
+ *       accept-message;
+ * }
+ * @endcode
+ *
+ * Multiple filters can be arranged in a filter list and set with
+ * @ref Sockopts. If one of these filters matches a CAN ID upon reception
+ * of a CAN frame, this frame is accepted.
+ *
+ */
+typedef struct can_filter {
+	/** CAN ID which must match with incoming IDs after passing the mask.
+	 *  The filter logic can be inverted with the flag @ref CAN_INV_FILTER. */
+	uint32_t can_id;
+
+	/** Mask which is applied to incoming IDs. See @ref CAN_xxx_MASK
+	 *  "CAN ID masks" if exactly one CAN ID should come through. */
+	uint32_t can_mask;
+} can_filter_t;
+
+/**
+ * Socket address structure for the CAN address family
+ */
+struct sockaddr_can {
+	/** CAN address family, must be @c AF_CAN */
+	sa_family_t can_family;
+
+	/** Interface index of CAN controller. See @ref SIOCGIFINDEX. */
+	int can_ifindex;
+};
+
+/**
+ * Raw CAN frame
+ *
+ * Central structure for receiving and sending CAN frames.
+ */
+typedef struct can_frame {
+	/** CAN ID of the frame
+	 *
+	 *  See @ref CAN_xxx_FLAG "CAN ID flags" for special bits.
+	 */
+	can_id_t can_id;
+
+	/** Size of the payload in bytes */
+	uint8_t can_dlc;
+
+	/** Payload data bytes */
+	uint8_t data[8] __attribute__ ((aligned(8)));
+} can_frame_t;
+
+/**
+ * CAN interface request descriptor
+ *
+ * Parameter block for submitting CAN control requests.
+ */
+struct can_ifreq {
+	union {
+		char	ifrn_name[IFNAMSIZ];
+	} ifr_ifrn;
+	
+	union {
+		struct can_bittime bittime;
+		can_baudrate_t baudrate;
+		can_ctrlmode_t ctrlmode;
+		can_mode_t mode;
+		can_state_t state;
+		int ifru_ivalue;
+	} ifr_ifru;
+};
+
+/*!
+ * @anchor RTCAN_TIMESTAMPS   @name Timestamp switches
+ * Arguments to pass to @ref RTCAN_RTIOC_TAKE_TIMESTAMP
+ * @{ */
+#define RTCAN_TAKE_NO_TIMESTAMPS	0  /**< Switch off taking timestamps */
+#define RTCAN_TAKE_TIMESTAMPS		1  /**< Do take timestamps */
+/** @} */
+
+#define RTIOC_TYPE_CAN  RTDM_CLASS_CAN
+
+/*!
+ * @anchor Rawsockopts @name RAW socket options
+ * Setting and getting CAN RAW socket options.
+ * @{ */
+
+/**
+ * CAN filter definition
+ *
+ * A CAN raw filter list with elements of struct can_filter can be installed
+ * with @c setsockopt. This list is used upon reception of CAN frames to
+ * decide whether the bound socket will receive a frame. An empty filter list
+ * can also be defined using optlen = 0, which is recommanded for write-only
+ * sockets.
+ * @n
+ * If the socket was already bound with @ref Bind, the old filter list
+ * gets replaced with the new one. Be aware that already received, but
+ * not read out CAN frames may stay in the socket buffer.
+ * @n
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_FILTER
+ *
+ * @param [in] optval Pointer to array of struct can_filter.
+ *
+ * @param [in] optlen Size of filter list: count * sizeof( struct can_filter).
+ * @n
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -ENOMEM (Not enough memory to fulfill the operation)
+ * - -EINVAL (Invalid length "optlen")
+ * - -ENOSPC (No space to store filter list, check RT-Socket-CAN kernel
+ *            parameters)
+ * .
+ */
+#define CAN_RAW_FILTER		0x1
+
+/**
+ * CAN error mask
+ *
+ * A CAN error mask (see @ref Errors) can be set with @c setsockopt. This
+ * mask is then used to decide if error frames are delivered to this socket
+ * in case of error condidtions. The error frames are marked with the
+ * @ref CAN_ERR_FLAG of @ref CAN_xxx_FLAG and must be handled by the
+ * application properly. A detailed description of the errors can be
+ * found in the @c can_id and the @c data fields of struct can_frame
+ * (see @ref Errors for futher details).
+ *
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_ERR_FILTER
+ *
+ * @param [in] optval Pointer to error mask of type can_err_mask_t.
+ *
+ * @param [in] optlen Size of error mask: sizeof(can_err_mask_t).
+ *
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -EINVAL (Invalid length "optlen")
+ * .
+ */
+#define CAN_RAW_ERR_FILTER	0x2
+
+/**
+ * CAN TX loopback
+ *
+ * The TX loopback to other local sockets can be selected with this
+ * @c setsockopt.
+ *
+ * @note The TX loopback feature must be enabled in the kernel and then
+ * the loopback to other local TX sockets is enabled by default.
+ *
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_LOOPBACK
+ *
+ * @param [in] optval Pointer to integer value.
+ *
+ * @param [in] optlen Size of int: sizeof(int).
+ *
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -EINVAL (Invalid length "optlen")
+ * - -EOPNOTSUPP (not supported, check RT-Socket-CAN kernel parameters).
+ */
+#define CAN_RAW_LOOPBACK	0x3
+
+/**
+ * CAN receive own messages
+ *
+ * Not supported by RT-Socket-CAN, but defined for compatibility with
+ * Socket-CAN.
+ */
+#define CAN_RAW_RECV_OWN_MSGS   0x4
+
+/** @} */
+
+/*!
+ * @anchor CANIOCTLs @name IOCTLs
+ * CAN device IOCTLs
+ *
+ * @deprecated Passing \c struct \c ifreq as a request descriptor
+ * for CAN IOCTLs is still accepted for backward compatibility,
+ * however it is recommended to switch to \c struct \c can_ifreq at
+ * the first opportunity.
+ *
+ * @{ */
+
+/**
+ * Get CAN interface index by name
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                     (<TT>struct can_ifreq</TT>). If
+ *                     <TT>ifr_name</TT> holds a valid CAN interface
+ *                     name <TT>ifr_ifindex</TT> will be filled with
+ *                     the corresponding interface index.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ *
+ * @coretags{task-unrestricted}
+ */
+#ifdef DOXYGEN_CPP /* For Doxygen only, already defined by kernel headers */
+#define SIOCGIFINDEX defined_by_kernel_header_file
+#endif
+
+/**
+ * Set baud rate
+ *
+ * The baudrate must be specified in bits per second. The driver will
+ * try to calculate resonable CAN bit-timing parameters. You can use
+ * @ref SIOCSCANCUSTOMBITTIME to set custom bit-timing.
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_baudrate_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EDOM  : Baud rate not possible.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting the baud rate is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANBAUDRATE	_IOW(RTIOC_TYPE_CAN, 0x01, struct can_ifreq)
+
+/**
+ * Get baud rate
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    @ref can_baudrate_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define SIOCGCANBAUDRATE	_IOWR(RTIOC_TYPE_CAN, 0x02, struct can_ifreq)
+
+/**
+ * Set custom bit time parameter
+ *
+ * Custem-bit time could be defined in various formats (see
+ * struct can_bittime).
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 struct can_bittime.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting the bit-time is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANCUSTOMBITTIME	_IOW(RTIOC_TYPE_CAN, 0x03, struct can_ifreq)
+
+/**
+ * Get custom bit-time parameters
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    struct can_bittime.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define SIOCGCANCUSTOMBITTIME	_IOWR(RTIOC_TYPE_CAN, 0x04, struct can_ifreq)
+
+/**
+ * Set operation mode of CAN controller
+ *
+ * See @ref CAN_MODE "CAN controller modes" for available modes.
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_mode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EAGAIN: (@ref CAN_MODE_START, @ref CAN_MODE_STOP) Could not successfully
+ *            set mode, hardware is busy. Try again.
+ * - -EINVAL: (@ref CAN_MODE_START) Cannot start controller,
+ *            set baud rate first.
+ * - -ENETDOWN: (@ref CAN_MODE_SLEEP) Cannot go into sleep mode because
+		controller is stopped or bus off.
+ * - -EOPNOTSUPP: unknown mode
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting a CAN controller into normal operation after a bus-off can
+ * take some time (128 occurrences of 11 consecutive recessive bits).
+ * In such a case, although this IOCTL will return immediately with success
+ * and @ref SIOCGCANSTATE will report @ref CAN_STATE_ACTIVE,
+ * bus-off recovery may still be in progress. @n
+ * If a controller is bus-off, setting it into stop mode will return no error
+ * but the controller remains bus-off.
+ */
+#define SIOCSCANMODE		_IOW(RTIOC_TYPE_CAN, 0x05, struct can_ifreq)
+
+/**
+ * Get current state of CAN controller
+ *
+ * States are divided into main states and additional error indicators. A CAN
+ * controller is always in exactly one main state. CAN bus errors are
+ * registered by the CAN hardware and collected by the driver. There is one
+ * error indicator (bit) per error type. If this IOCTL is triggered the error
+ * types which occured since the last call of this IOCTL are reported and
+ * thereafter the error indicators are cleared. See also
+ * @ref CAN_STATE "CAN controller states".
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    @ref can_mode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+#define SIOCGCANSTATE		_IOWR(RTIOC_TYPE_CAN, 0x06, struct can_ifreq)
+
+/**
+ * Set special controller modes
+ *
+ * Various special controller modes could be or'ed together (see
+ * @ref CAN_CTRLMODE for further information).
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_ctrlmode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting special controller modes is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANCTRLMODE	_IOW(RTIOC_TYPE_CAN, 0x07, struct can_ifreq)
+
+/**
+ * Get special controller modes
+ *
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_ctrlmode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+#define SIOCGCANCTRLMODE	_IOWR(RTIOC_TYPE_CAN, 0x08, struct can_ifreq)
+
+/**
+ * Enable or disable storing a high precision timestamp upon reception of
+ * a CAN frame.
+ *
+ * A newly created socket takes no timestamps by default.
+ *
+ * @param [in] arg int variable, see @ref RTCAN_TIMESTAMPS "Timestamp switches"
+ *
+ * @return 0 on success.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Activating taking timestamps only has an effect on newly received
+ * CAN messages from the bus. Frames that already are in the socket buffer do
+ * not have timestamps if it was deactivated before. See @ref Recv "Receive"
+ * for more details.
+ */
+#define RTCAN_RTIOC_TAKE_TIMESTAMP _IOW(RTIOC_TYPE_CAN, 0x09, int)
+
+/**
+ * Specify a reception timeout for a socket
+ *
+ * Defines a timeout for all receive operations via a
+ * socket which will take effect when one of the @ref Recv "receive functions"
+ * is called without the @c MSG_DONTWAIT flag set.
+ *
+ * The default value for a newly created socket is an infinite timeout.
+ *
+ * @note The setting of the timeout value is not done atomically to avoid
+ * locks. Please set the value before receiving messages from the socket.
+ *
+ * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is
+ *                interpreted as relative timeout in nanoseconds in case
+ *                of a positive value.
+ *                See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTCAN_RTIOC_RCV_TIMEOUT	_IOW(RTIOC_TYPE_CAN, 0x0A, nanosecs_rel_t)
+
+/**
+ * Specify a transmission timeout for a socket
+ *
+ * Defines a timeout for all send operations via a
+ * socket which will take effect when one of the @ref Send "send functions"
+ * is called without the @c MSG_DONTWAIT flag set.
+ *
+ * The default value for a newly created socket is an infinite timeout.
+ *
+ * @note The setting of the timeout value is not done atomically to avoid
+ * locks. Please set the value before sending messages to the socket.
+ *
+ * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is
+ *                interpreted as relative timeout in nanoseconds in case
+ *                of a positive value.
+ *                See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTCAN_RTIOC_SND_TIMEOUT	_IOW(RTIOC_TYPE_CAN, 0x0B, nanosecs_rel_t)
+/** @} */
+
+#define CAN_ERR_DLC  8	/* dlc for error frames */
+
+/*!
+ * @anchor Errors @name Error mask
+ * Error class (mask) in @c can_id field of struct can_frame to
+ * be used with @ref CAN_RAW_ERR_FILTER.
+ *
+ * @b Note: Error reporting is hardware dependent and most CAN controllers
+ * report less detailed error conditions than the SJA1000.
+ *
+ * @b Note: In case of a bus-off error condition (@ref CAN_ERR_BUSOFF), the
+ * CAN controller is @b not restarted automatically. It is the application's
+ * responsibility to react appropriately, e.g. calling @ref CAN_MODE_START.
+ *
+ * @b Note: Bus error interrupts (@ref CAN_ERR_BUSERROR) are enabled when an
+ * application is calling a @ref Recv function on a socket listening
+ * on bus errors (using @ref CAN_RAW_ERR_FILTER). After one bus error has
+ * occured, the interrupt will be disabled to allow the application time for
+ * error processing and to efficiently avoid bus error interrupt flooding.
+ * @{ */
+
+/** TX timeout (netdevice driver) */
+#define CAN_ERR_TX_TIMEOUT	0x00000001U
+
+/** Lost arbitration (see @ref Error0 "data[0]") */
+#define CAN_ERR_LOSTARB		0x00000002U
+
+/** Controller problems (see @ref Error1 "data[1]") */
+#define CAN_ERR_CRTL		0x00000004U
+
+/** Protocol violations (see @ref Error2 "data[2]",
+			     @ref Error3 "data[3]") */
+#define CAN_ERR_PROT		0x00000008U
+
+/** Transceiver status (see @ref Error4 "data[4]")    */
+#define CAN_ERR_TRX		0x00000010U
+
+/** Received no ACK on transmission */
+#define CAN_ERR_ACK		0x00000020U
+
+/** Bus off */
+#define CAN_ERR_BUSOFF		0x00000040U
+
+/** Bus error (may flood!) */
+#define CAN_ERR_BUSERROR	0x00000080U
+
+/** Controller restarted */
+#define CAN_ERR_RESTARTED	0x00000100U
+
+/** Omit EFF, RTR, ERR flags */
+#define CAN_ERR_MASK		0x1FFFFFFFU
+
+/** @} */
+
+/*!
+ * @anchor Error0 @name Arbitration lost error
+ * Error in the data[0] field of struct can_frame.
+ * @{ */
+/* arbitration lost in bit ... / data[0] */
+#define CAN_ERR_LOSTARB_UNSPEC	0x00 /**< unspecified */
+				     /**< else bit number in bitstream */
+/** @} */
+
+/*!
+ * @anchor Error1 @name Controller problems
+ * Error in the data[1] field of struct can_frame.
+ * @{ */
+/* error status of CAN-controller / data[1] */
+#define CAN_ERR_CRTL_UNSPEC	 0x00 /**< unspecified */
+#define CAN_ERR_CRTL_RX_OVERFLOW 0x01 /**< RX buffer overflow */
+#define CAN_ERR_CRTL_TX_OVERFLOW 0x02 /**< TX buffer overflow */
+#define CAN_ERR_CRTL_RX_WARNING	 0x04 /**< reached warning level for RX errors */
+#define CAN_ERR_CRTL_TX_WARNING	 0x08 /**< reached warning level for TX errors */
+#define CAN_ERR_CRTL_RX_PASSIVE	 0x10 /**< reached passive level for RX errors */
+#define CAN_ERR_CRTL_TX_PASSIVE	 0x20 /**< reached passive level for TX errors */
+/** @} */
+
+/*!
+ * @anchor Error2 @name Protocol error type
+ * Error in the data[2] field of struct can_frame.
+ * @{ */
+/* error in CAN protocol (type) / data[2] */
+#define CAN_ERR_PROT_UNSPEC	0x00 /**< unspecified */
+#define CAN_ERR_PROT_BIT	0x01 /**< single bit error */
+#define CAN_ERR_PROT_FORM	0x02 /**< frame format error */
+#define CAN_ERR_PROT_STUFF	0x04 /**< bit stuffing error */
+#define CAN_ERR_PROT_BIT0	0x08 /**< unable to send dominant bit */
+#define CAN_ERR_PROT_BIT1	0x10 /**< unable to send recessive bit */
+#define CAN_ERR_PROT_OVERLOAD	0x20 /**< bus overload */
+#define CAN_ERR_PROT_ACTIVE	0x40 /**< active error announcement */
+#define CAN_ERR_PROT_TX		0x80 /**< error occured on transmission */
+/** @} */
+
+/*!
+ * @anchor Error3 @name Protocol error location
+ * Error in the data[3] field of struct can_frame.
+ * @{ */
+/* error in CAN protocol (location) / data[3] */
+#define CAN_ERR_PROT_LOC_UNSPEC	 0x00 /**< unspecified */
+#define CAN_ERR_PROT_LOC_SOF	 0x03 /**< start of frame */
+#define CAN_ERR_PROT_LOC_ID28_21 0x02 /**< ID bits 28 - 21 (SFF: 10 - 3) */
+#define CAN_ERR_PROT_LOC_ID20_18 0x06 /**< ID bits 20 - 18 (SFF: 2 - 0 )*/
+#define CAN_ERR_PROT_LOC_SRTR	 0x04 /**< substitute RTR (SFF: RTR) */
+#define CAN_ERR_PROT_LOC_IDE	 0x05 /**< identifier extension */
+#define CAN_ERR_PROT_LOC_ID17_13 0x07 /**< ID bits 17-13 */
+#define CAN_ERR_PROT_LOC_ID12_05 0x0F /**< ID bits 12-5 */
+#define CAN_ERR_PROT_LOC_ID04_00 0x0E /**< ID bits 4-0 */
+#define CAN_ERR_PROT_LOC_RTR	 0x0C /**< RTR */
+#define CAN_ERR_PROT_LOC_RES1	 0x0D /**< reserved bit 1 */
+#define CAN_ERR_PROT_LOC_RES0	 0x09 /**< reserved bit 0 */
+#define CAN_ERR_PROT_LOC_DLC	 0x0B /**< data length code */
+#define CAN_ERR_PROT_LOC_DATA	 0x0A /**< data section */
+#define CAN_ERR_PROT_LOC_CRC_SEQ 0x08 /**< CRC sequence */
+#define CAN_ERR_PROT_LOC_CRC_DEL 0x18 /**< CRC delimiter */
+#define CAN_ERR_PROT_LOC_ACK	 0x19 /**< ACK slot */
+#define CAN_ERR_PROT_LOC_ACK_DEL 0x1B /**< ACK delimiter */
+#define CAN_ERR_PROT_LOC_EOF	 0x1A /**< end of frame */
+#define CAN_ERR_PROT_LOC_INTERM	 0x12 /**< intermission */
+/** @} */
+
+/*!
+ * @anchor Error4 @name Protocol error location
+ * Error in the data[4] field of struct can_frame.
+ * @{ */
+/* error status of CAN-transceiver / data[4] */
+/*                                               CANH CANL */
+#define CAN_ERR_TRX_UNSPEC		0x00 /**< 0000 0000 */
+#define CAN_ERR_TRX_CANH_NO_WIRE	0x04 /**< 0000 0100 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_BAT	0x05 /**< 0000 0101 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_VCC	0x06 /**< 0000 0110 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_GND	0x07 /**< 0000 0111 */
+#define CAN_ERR_TRX_CANL_NO_WIRE	0x40 /**< 0100 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_BAT	0x50 /**< 0101 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_VCC	0x60 /**< 0110 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_GND	0x70 /**< 0111 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_CANH	0x80 /**< 1000 0000 */
+/** @} */
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_CAN_H */
+++ linux-patched/include/xenomai/rtdm/uapi/net.h	2022-03-21 12:58:32.232861238 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/spi.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *  As a special exception to the GNU General Public license, the RTnet
+ *  project allows you to use this header file in unmodified form to produce
+ *  application programs executing in user-space which use RTnet services by
+ *  normal system calls. The resulting executable will not be covered by the
+ *  GNU General Public License merely as a result of this header file use.
+ *  Instead, this header file use will be considered normal use of RTnet and
+ *  not a "derived work" in the sense of the GNU General Public License.
+ *
+ *  This exception does not apply when the application code is built as a
+ *  static or dynamically loadable portion of the Linux kernel nor does the
+ *  exception override other reasons justifying application of the GNU General
+ *  Public License.
+ *
+ *  This exception applies only to the code released by the RTnet project
+ *  under the name RTnet and bearing this exception notice. If you copy code
+ *  from other sources into a copy of RTnet, the exception does not apply to
+ *  the code that you add in this way.
+ *
+ */
+
+#ifndef _RTDM_UAPI_NET_H
+#define _RTDM_UAPI_NET_H
+
+/* sub-classes: RTDM_CLASS_NETWORK */
+#define RTDM_SUBCLASS_RTNET     0
+
+#define RTIOC_TYPE_NETWORK      RTDM_CLASS_NETWORK
+
+/* RTnet-specific IOCTLs */
+#define RTNET_RTIOC_XMITPARAMS  _IOW(RTIOC_TYPE_NETWORK, 0x10, unsigned int)
+#define RTNET_RTIOC_PRIORITY    RTNET_RTIOC_XMITPARAMS  /* legacy */
+#define RTNET_RTIOC_TIMEOUT     _IOW(RTIOC_TYPE_NETWORK, 0x11, int64_t)
+/* RTNET_RTIOC_CALLBACK         _IOW(RTIOC_TYPE_NETWORK, 0x12, ...
+ * IOCTL only usable inside the kernel. */
+/* RTNET_RTIOC_NONBLOCK         _IOW(RTIOC_TYPE_NETWORK, 0x13, unsigned int)
+ * This IOCTL is no longer supported (and it was buggy anyway).
+ * Use RTNET_RTIOC_TIMEOUT with any negative timeout value instead. */
+#define RTNET_RTIOC_EXTPOOL     _IOW(RTIOC_TYPE_NETWORK, 0x14, unsigned int)
+#define RTNET_RTIOC_SHRPOOL     _IOW(RTIOC_TYPE_NETWORK, 0x15, unsigned int)
+
+/* socket transmission priorities */
+#define SOCK_MAX_PRIO           0
+#define SOCK_DEF_PRIO           SOCK_MAX_PRIO + \
+				    (SOCK_MIN_PRIO-SOCK_MAX_PRIO+1)/2
+#define SOCK_MIN_PRIO           SOCK_NRT_PRIO - 1
+#define SOCK_NRT_PRIO           31
+
+/* socket transmission channels */
+#define SOCK_DEF_RT_CHANNEL     0           /* default rt xmit channel     */
+#define SOCK_DEF_NRT_CHANNEL    1           /* default non-rt xmit channel */
+#define SOCK_USER_CHANNEL       2           /* first user-defined channel  */
+
+/* argument construction for RTNET_RTIOC_XMITPARAMS */
+#define SOCK_XMIT_PARAMS(priority, channel) ((priority) | ((channel) << 16))
+
+#endif  /* !_RTDM_UAPI_NET_H */
+++ linux-patched/include/xenomai/rtdm/uapi/spi.h	2022-03-21 12:58:32.225861306 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/autotune.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_SPI_H
+#define _RTDM_UAPI_SPI_H
+
+#include <linux/types.h>
+
+struct rtdm_spi_config {
+	__u32 speed_hz;
+	__u16 mode;
+	__u8 bits_per_word;
+};
+
+struct rtdm_spi_iobufs {
+	__u32 io_len;
+	__u32 i_offset;
+	__u32 o_offset;
+	__u32 map_len;
+};
+
+#define SPI_RTIOC_SET_CONFIG		_IOW(RTDM_CLASS_SPI, 0, struct rtdm_spi_config)
+#define SPI_RTIOC_GET_CONFIG		_IOR(RTDM_CLASS_SPI, 1, struct rtdm_spi_config)
+#define SPI_RTIOC_SET_IOBUFS		_IOR(RTDM_CLASS_SPI, 2, struct rtdm_spi_iobufs)
+#define SPI_RTIOC_TRANSFER		_IO(RTDM_CLASS_SPI, 3)
+#define SPI_RTIOC_TRANSFER_N		_IOR(RTDM_CLASS_SPI, 4, int)
+
+#endif /* !_RTDM_UAPI_SPI_H */
+++ linux-patched/include/xenomai/rtdm/uapi/autotune.h	2022-03-21 12:58:32.217861384 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/ipc.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_AUTOTUNE_H
+#define _RTDM_UAPI_AUTOTUNE_H
+
+#include <linux/types.h>
+
+#define RTDM_CLASS_AUTOTUNE		RTDM_CLASS_MISC
+#define RTDM_SUBCLASS_AUTOTUNE		0
+
+struct autotune_setup {
+	__u32 period;
+	__u32 quiet;
+};
+
+#define AUTOTUNE_RTIOC_IRQ		_IOW(RTDM_CLASS_AUTOTUNE, 0, struct autotune_setup)
+#define AUTOTUNE_RTIOC_KERN		_IOW(RTDM_CLASS_AUTOTUNE, 1, struct autotune_setup)
+#define AUTOTUNE_RTIOC_USER		_IOW(RTDM_CLASS_AUTOTUNE, 2, struct autotune_setup)
+#define AUTOTUNE_RTIOC_PULSE		_IOW(RTDM_CLASS_AUTOTUNE, 3, __u64)
+#define AUTOTUNE_RTIOC_RUN		_IOR(RTDM_CLASS_AUTOTUNE, 4, __u32)
+#define AUTOTUNE_RTIOC_RESET		_IO(RTDM_CLASS_AUTOTUNE, 5)
+
+#endif /* !_RTDM_UAPI_AUTOTUNE_H */
+++ linux-patched/include/xenomai/rtdm/ipc.h	2022-03-21 12:58:31.927864212 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/udd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_RTDM_IPC_H
+#define _COBALT_RTDM_IPC_H
+
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/if.h>
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/ipc.h>
+
+#endif /* !_COBALT_RTDM_IPC_H */
+++ linux-patched/include/xenomai/rtdm/udd.h	2022-03-21 12:58:31.920864280 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/testing.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_UDD_H
+#define _COBALT_RTDM_UDD_H
+
+#include <linux/list.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/udd.h>
+
+/**
+ * @ingroup rtdm_profiles
+ * @defgroup rtdm_udd User-space driver core
+ *
+ * This profile includes all mini-drivers sitting on top of the
+ * User-space Device Driver framework (UDD). The generic UDD core
+ * driver enables interrupt control and I/O memory access interfaces
+ * to user-space device drivers, as defined by the mini-drivers when
+ * registering.
+ *
+ * A mini-driver supplements the UDD core with ancillary functions for
+ * dealing with @ref udd_memory_region "memory mappings" and @ref
+ * udd_irq_handler "interrupt control" for a particular I/O
+ * card/device.
+ *
+ * UDD-compliant mini-drivers only have to provide the basic support
+ * for dealing with the interrupt sources present in the device, so
+ * that most part of the device requests can be handled from a Xenomai
+ * application running in user-space. Typically, a mini-driver would
+ * handle the interrupt top-half, and the user-space application would
+ * handle the bottom-half.
+ *
+ * This profile is reminiscent of the UIO framework available with the
+ * Linux kernel, adapted to the dual kernel Cobalt environment.
+ *
+ * @{
+ */
+
+/**
+ * @anchor udd_irq_special
+ * Special IRQ values for udd_device.irq
+ *
+ * @{
+ */
+/**
+ * No IRQ managed. Passing this code implicitly disables all
+ * interrupt-related services, including control (disable/enable) and
+ * notification.
+ */
+#define UDD_IRQ_NONE     0
+/**
+ * IRQ directly managed from the mini-driver on top of the UDD
+ * core. The mini-driver is in charge of attaching the handler(s) to
+ * the IRQ(s) it manages, notifying the Cobalt threads waiting for IRQ
+ * events by calling the udd_notify_event() service.
+ */
+#define UDD_IRQ_CUSTOM   (-1)
+/** @} */
+
+/**
+ * @anchor udd_memory_types  @name Memory types for mapping
+ * Types of memory for mapping
+ *
+ * The UDD core implements a default ->mmap() handler which first
+ * attempts to hand over the request to the corresponding handler
+ * defined by the mini-driver. If not present, the UDD core
+ * establishes the mapping automatically, depending on the memory
+ * type defined for the region.
+ *
+ * @{
+ */
+/**
+ * No memory region. Use this type code to disable an entry in the
+ * array of memory mappings, i.e. udd_device.mem_regions[].
+ */
+#define UDD_MEM_NONE     0
+/**
+ * Physical I/O memory region. By default, the UDD core maps such
+ * memory to a virtual user range by calling the rtdm_mmap_iomem()
+ * service.
+ */
+#define UDD_MEM_PHYS     1
+/**
+ * Kernel logical memory region (e.g. kmalloc()). By default, the UDD
+ * core maps such memory to a virtual user range by calling the
+ * rtdm_mmap_kmem() service. */
+#define UDD_MEM_LOGICAL  2
+/**
+ * Virtual memory region with no direct physical mapping
+ * (e.g. vmalloc()). By default, the UDD core maps such memory to a
+ * virtual user range by calling the rtdm_mmap_vmem() service.
+ */
+#define UDD_MEM_VIRTUAL  3
+/** @} */
+
+#define UDD_NR_MAPS  5
+
+/**
+ * @anchor udd_memory_region
+ * UDD memory region descriptor.
+ *
+ * This descriptor defines the characteristics of a memory region
+ * declared to the UDD core by the mini-driver. All valid regions
+ * should be declared in the udd_device.mem_regions[] array,
+ * invalid/unassigned ones should bear the UDD_MEM_NONE type.
+ *
+ * The UDD core exposes each region via the mmap(2) interface to the
+ * application. To this end, a companion mapper device is created
+ * automatically when registering the mini-driver.
+ *
+ * The mapper device creates special files in the RTDM namespace for
+ * reaching the individual regions, which the application can open
+ * then map to its address space via the mmap(2) system call.
+ *
+ * For instance, declaring a region of physical memory at index #2 of
+ * the memory region array could be done as follows:
+ *
+ * @code
+ * static struct udd_device udd;
+ *
+ * static int foocard_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ * {
+ *      udd.device_name = "foocard";
+ *      ...
+ *      udd.mem_regions[2].name = "ADC";
+ *      udd.mem_regions[2].addr = pci_resource_start(dev, 1);
+ *      udd.mem_regions[2].len = pci_resource_len(dev, 1);
+ *      udd.mem_regions[2].type = UDD_MEM_PHYS;
+ *      ...
+ *      return udd_register_device(&udd);
+ * }
+ * @endcode
+ *
+ * This will make such region accessible via the mapper device using
+ * the following sequence of code (see note), via the default
+ * ->mmap() handler from the UDD core:
+ *
+ * @code
+ * int fd, fdm;
+ * void *p;
+ *
+ * fd = open("/dev/rtdm/foocard", O_RDWR);
+ * fdm = open("/dev/rtdm/foocard,mapper2", O_RDWR);
+ * p = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fdm, 0);
+ * @endcode
+ *
+ * if no valid region has been declared in the
+ * udd_device.mem_regions[] array, no mapper device is created.
+ *
+ * @note The example code assumes that @ref cobalt_api POSIX symbol
+ * wrapping is in effect, so that RTDM performs the memory mapping
+ * operation (not the regular kernel).
+ */
+struct udd_memregion {
+	/** Name of the region (informational but required) */
+	const char *name;
+	/**
+	 * Start address of the region. This may be a physical or
+	 * virtual address, depending on the @ref udd_memory_types
+	 * "memory type".
+	 */
+	unsigned long addr;
+	/**
+	 * Length (in bytes) of the region. This value must be
+	 * PAGE_SIZE aligned.
+	 */
+	size_t len;
+	/**
+	 * Type of the region. See the discussion about @ref
+	 * udd_memory_types "UDD memory types" for possible values.
+	 */
+	int type;
+};
+
+/**
+ * @anchor udd_device
+ * UDD device descriptor.
+ *
+ * This descriptor defines the characteristics of a UDD-based
+ * mini-driver when registering via a call to udd_register_device().
+ */
+struct udd_device {
+	/**
+	 * Name of the device managed by the mini-driver, appears
+	 * automatically in the /dev/rtdm namespace upon creation.
+	 */
+	const char *device_name;
+	/**
+	 * Additional device flags (e.g. RTDM_EXCLUSIVE)
+	 * RTDM_NAMED_DEVICE may be omitted).
+	 */
+	int device_flags;
+	/**
+	 * Subclass code of the device managed by the mini-driver (see
+	 * RTDM_SUBCLASS_xxx definition in the @ref rtdm_profiles
+	 * "Device Profiles"). The main class code is pre-set to
+	 * RTDM_CLASS_UDD.
+	 */
+	int device_subclass;
+	struct {
+		/**
+		 * Ancillary open() handler, optional. See
+		 * rtdm_open_handler().
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		int (*open)(struct rtdm_fd *fd, int oflags);
+		/**
+		 * Ancillary close() handler, optional. See
+		 * rtdm_close_handler().
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		void (*close)(struct rtdm_fd *fd);
+		/**
+		 * Ancillary ioctl() handler, optional. See
+		 * rtdm_ioctl_handler().
+		 *
+		 * If this routine returns -ENOSYS, the default action
+		 * implemented by the UDD core for the corresponding
+		 * request will be applied, as if no ioctl handler had
+		 * been defined.
+		 *
+		 * @note This handler is called from primary mode
+		 * only.
+		 */
+		int (*ioctl)(struct rtdm_fd *fd,
+			     unsigned int request, void *arg);
+		/**
+		 * Ancillary mmap() handler for the mapper device,
+		 * optional. See rtdm_mmap_handler(). The mapper
+		 * device operates on a valid region defined in the @a
+		 * mem_regions[] array. A pointer to the region 
+		 * can be obtained by a call to udd_get_region().
+		 *
+		 * If this handler is NULL, the UDD core establishes
+		 * the mapping automatically, depending on the memory
+		 * type defined for the region.
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		int (*mmap)(struct rtdm_fd *fd,
+			    struct vm_area_struct *vma);
+		/**
+		 * @anchor udd_irq_handler
+		 *
+		 * Ancillary handler for receiving interrupts. This
+		 * handler must be provided if the mini-driver hands
+		 * over IRQ handling to the UDD core, by setting the
+		 * @a irq field to a valid value, different from
+		 * UDD_IRQ_CUSTOM and UDD_IRQ_NONE.
+		 *
+		 * The ->interrupt() handler shall return one of the
+		 * following status codes:
+		 *
+		 * - RTDM_IRQ_HANDLED, if the mini-driver successfully
+		 * handled the IRQ. This flag can be combined with
+		 * RTDM_IRQ_DISABLE to prevent the Cobalt kernel from
+		 * re-enabling the interrupt line upon return,
+		 * otherwise it is re-enabled automatically.
+		 *
+		 * - RTDM_IRQ_NONE, if the interrupt does not match
+		 * any IRQ the mini-driver can handle.
+		 *
+		 * Once the ->interrupt() handler has returned, the
+		 * UDD core notifies user-space Cobalt threads waiting
+		 * for IRQ events (if any).
+		 *
+		 * @note This handler is called from primary mode
+		 * only.
+		 */
+		int (*interrupt)(struct udd_device *udd);
+	} ops;
+	/**
+	 * IRQ number. If valid, the UDD core manages the
+	 * corresponding interrupt line, installing a base handler.
+	 * Otherwise, a special value can be passed for declaring
+	 * @ref udd_irq_special "unmanaged IRQs".
+	 */
+	int irq;
+	/**
+	 * Array of memory regions defined by the device. The array
+	 * can be sparse, with some entries bearing the UDD_MEM_NONE
+	 * type interleaved with valid ones.  See the discussion about
+	 * @ref udd_memory_region "UDD memory regions".
+	 */
+	struct udd_memregion mem_regions[UDD_NR_MAPS];
+	/** Reserved to the UDD core. */
+	struct udd_reserved {
+		rtdm_irq_t irqh;
+		u32 event_count;
+		struct udd_signotify signfy;
+		struct rtdm_event pulse;
+		struct rtdm_driver driver;
+		struct rtdm_device device;
+		struct rtdm_driver mapper_driver;
+		struct udd_mapper {
+			struct udd_device *udd;
+			struct rtdm_device dev;
+		} mapdev[UDD_NR_MAPS];
+		char *mapper_name;
+		int nr_maps;
+	} __reserved;
+};
+
+int udd_register_device(struct udd_device *udd);
+
+int udd_unregister_device(struct udd_device *udd);
+
+struct udd_device *udd_get_device(struct rtdm_fd *fd);
+
+void udd_notify_event(struct udd_device *udd);
+
+void udd_enable_irq(struct udd_device *udd,
+		    rtdm_event_t *done);
+
+void udd_disable_irq(struct udd_device *udd,
+		     rtdm_event_t *done);
+
+/** @} */
+
+#endif /* !_COBALT_RTDM_UDD_H */
+++ linux-patched/include/xenomai/rtdm/testing.h	2022-03-21 12:58:31.912864359 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/gpio.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_TESTING_H
+#define _COBALT_RTDM_TESTING_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/testing.h>
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <rtdm/compat.h>
+
+struct compat_rttst_overall_bench_res {
+	struct rttst_bench_res result;
+	compat_uptr_t histogram_avg;
+	compat_uptr_t histogram_min;
+	compat_uptr_t histogram_max;
+};
+
+struct compat_rttst_heap_stathdr {
+	int nrstats;
+	compat_uptr_t buf;
+};
+
+#define RTTST_RTIOC_TMBENCH_STOP_COMPAT \
+	_IOWR(RTIOC_TYPE_TESTING, 0x11, struct compat_rttst_overall_bench_res)
+
+#endif	/* CONFIG_XENO_ARCH_SYS3264 */
+
+#endif /* !_COBALT_RTDM_TESTING_H */
+++ linux-patched/include/xenomai/rtdm/gpio.h	2022-03-21 12:58:31.905864427 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/compat.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_GPIO_H
+#define _COBALT_RTDM_GPIO_H
+
+#include <linux/list.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/gpio.h>
+
+struct class;
+struct device_node;
+struct gpio_desc;
+
+struct rtdm_gpio_pin {
+	struct rtdm_device dev;
+	struct list_head next;
+	rtdm_irq_t irqh;
+	rtdm_event_t event;
+	char *name;
+	struct gpio_desc *desc;
+	nanosecs_abs_t timestamp;
+	bool monotonic_timestamp;
+};
+
+struct rtdm_gpio_chip {
+	struct gpio_chip *gc;
+	struct rtdm_driver driver;
+	struct class *devclass;
+	struct list_head next;
+	rtdm_lock_t lock;
+	struct rtdm_gpio_pin pins[0];
+};
+
+int rtdm_gpiochip_add(struct rtdm_gpio_chip *rgc,
+		      struct gpio_chip *gc,
+		      int gpio_subclass);
+
+struct rtdm_gpio_chip *
+rtdm_gpiochip_alloc(struct gpio_chip *gc,
+		    int gpio_subclass);
+
+void rtdm_gpiochip_remove(struct rtdm_gpio_chip *rgc);
+
+int rtdm_gpiochip_add_by_name(struct rtdm_gpio_chip *rgc,
+			      const char *label, int gpio_subclass);
+
+int rtdm_gpiochip_post_event(struct rtdm_gpio_chip *rgc,
+			     unsigned int offset);
+
+int rtdm_gpiochip_find(struct device_node *from, const char *label, int type);
+
+int rtdm_gpiochip_array_find(struct device_node *from, const char *label[],
+			     int nentries, int type);
+
+#ifdef CONFIG_OF
+
+int rtdm_gpiochip_scan_of(struct device_node *from,
+			  const char *compat, int type);
+
+int rtdm_gpiochip_scan_array_of(struct device_node *from,
+				const char *compat[],
+				int nentries, int type);
+#endif
+
+void rtdm_gpiochip_remove_by_type(int type);
+
+#endif /* !_COBALT_RTDM_GPIO_H */
+++ linux-patched/include/xenomai/rtdm/compat.h	2022-03-21 12:58:31.894864534 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/serial.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_COMPAT_H
+#define _COBALT_RTDM_COMPAT_H
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <cobalt/kernel/compat.h>
+#include <rtdm/rtdm.h>
+
+struct compat_rtdm_getsockopt_args {
+	int level;
+	int optname;
+	compat_uptr_t optval;
+	compat_uptr_t optlen;
+};
+
+struct compat_rtdm_setsockopt_args {
+	int level;
+	int optname;
+	const compat_uptr_t optval;
+	socklen_t optlen;
+};
+
+struct compat_rtdm_getsockaddr_args {
+	compat_uptr_t addr;
+	compat_uptr_t addrlen;
+};
+
+struct compat_rtdm_setsockaddr_args {
+	const compat_uptr_t addr;
+	socklen_t addrlen;
+};
+
+#define _RTIOC_GETSOCKOPT_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x20,	\
+					     struct compat_rtdm_getsockopt_args)
+#define _RTIOC_SETSOCKOPT_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x21,	\
+					     struct compat_rtdm_setsockopt_args)
+#define _RTIOC_BIND_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x22,	\
+					     struct compat_rtdm_setsockaddr_args)
+#define _RTIOC_CONNECT_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x23,	\
+					     struct compat_rtdm_setsockaddr_args)
+#define _RTIOC_ACCEPT_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x25,	\
+					     struct compat_rtdm_getsockaddr_args)
+#define _RTIOC_GETSOCKNAME_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x26,	\
+					     struct compat_rtdm_getsockaddr_args)
+#define _RTIOC_GETPEERNAME_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x27,	\
+					     struct compat_rtdm_getsockaddr_args)
+
+#define __COMPAT_CASE(__op)		: case __op
+
+#else	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+#define __COMPAT_CASE(__op)
+
+#endif	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+#define COMPAT_CASE(__op)	case __op __COMPAT_CASE(__op  ## _COMPAT)
+
+#endif /* !_COBALT_RTDM_COMPAT_H */
+++ linux-patched/include/xenomai/rtdm/serial.h	2022-03-21 12:58:31.887864602 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/driver.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_SERIAL_H
+#define _COBALT_RTDM_SERIAL_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/serial.h>
+
+#endif /* !_COBALT_RTDM_SERIAL_H */
+++ linux-patched/include/xenomai/rtdm/driver.h	2022-03-21 12:58:31.879864680 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/rtdm.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, driver API header
+ *
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * @ingroup driverapi
+ */
+#ifndef _COBALT_RTDM_DRIVER_H
+#define _COBALT_RTDM_DRIVER_H
+
+#include <asm/atomic.h>
+#include <linux/cpumask.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <pipeline/lock.h>
+#include <pipeline/inband_work.h>
+#include <xenomai/version.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/select.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/init.h>
+#include <cobalt/kernel/ancillaries.h>
+#include <cobalt/kernel/tree.h>
+#include <rtdm/fd.h>
+#include <rtdm/rtdm.h>
+
+/* debug support */
+#include <cobalt/kernel/assert.h>
+#include <trace/events/cobalt-rtdm.h>
+#ifdef CONFIG_PCI
+#include <asm-generic/xenomai/pci_ids.h>
+#endif /* CONFIG_PCI */
+#include <asm/xenomai/syscall.h>
+
+struct class;
+typedef struct xnselector rtdm_selector_t;
+enum rtdm_selecttype;
+
+/*!
+ * @addtogroup rtdm_device_register
+ * @{
+ */
+
+/*!
+ * @anchor dev_flags @name Device Flags
+ * Static flags describing a RTDM device
+ * @{
+ */
+/** If set, only a single instance of the device can be requested by an
+ *  application. */
+#define RTDM_EXCLUSIVE			0x0001
+
+/**
+ * Use fixed minor provided in the rtdm_device description for
+ * registering. If this flag is absent, the RTDM core assigns minor
+ * numbers to devices managed by a driver in order of registration.
+ */
+#define RTDM_FIXED_MINOR		0x0002
+
+/** If set, the device is addressed via a clear-text name. */
+#define RTDM_NAMED_DEVICE		0x0010
+
+/** If set, the device is addressed via a combination of protocol ID and
+ *  socket type. */
+#define RTDM_PROTOCOL_DEVICE		0x0020
+
+/** Mask selecting the device type. */
+#define RTDM_DEVICE_TYPE_MASK		0x00F0
+
+/** Flag indicating a secure variant of RTDM (not supported here) */
+#define RTDM_SECURE_DEVICE		0x80000000
+/** @} Device Flags */
+
+/** Maximum number of named devices per driver. */
+#define RTDM_MAX_MINOR	4096
+
+/** @} rtdm_device_register */
+
+/*!
+ * @addtogroup rtdm_sync
+ * @{
+ */
+
+/*!
+ * @anchor RTDM_SELECTTYPE_xxx   @name RTDM_SELECTTYPE_xxx
+ * Event types select can bind to
+ * @{
+ */
+enum rtdm_selecttype {
+	/** Select input data availability events */
+	RTDM_SELECTTYPE_READ = XNSELECT_READ,
+
+	/** Select ouput buffer availability events */
+	RTDM_SELECTTYPE_WRITE = XNSELECT_WRITE,
+
+	/** Select exceptional events */
+	RTDM_SELECTTYPE_EXCEPT = XNSELECT_EXCEPT
+};
+/** @} RTDM_SELECTTYPE_xxx */
+
+/** @} rtdm_sync */
+
+/**
+ * @brief Device context
+ *
+ * A device context structure is associated with every open device instance.
+ * RTDM takes care of its creation and destruction and passes it to the
+ * operation handlers when being invoked.
+ *
+ * Drivers can attach arbitrary data immediately after the official
+ * structure.  The size of this data is provided via
+ * rtdm_driver.context_size during device registration.
+ */
+struct rtdm_dev_context {
+	struct rtdm_fd fd;
+
+	/** Set of active device operation handlers */
+	/** Reference to owning device */
+	struct rtdm_device *device;
+
+	/** Begin of driver defined context data structure */
+	char dev_private[0];
+};
+
+static inline struct rtdm_dev_context *rtdm_fd_to_context(struct rtdm_fd *fd)
+{
+	return container_of(fd, struct rtdm_dev_context, fd);
+}
+
+/**
+ * Locate the driver private area associated to a device context structure
+ *
+ * @param[in] fd File descriptor structure associated with opened
+ * device instance
+ *
+ * @return The address of the private driver area associated to @a
+ * file descriptor.
+ */
+static inline void *rtdm_fd_to_private(struct rtdm_fd *fd)
+{
+	return &rtdm_fd_to_context(fd)->dev_private[0];
+}
+
+/**
+ * Locate a device file descriptor structure from its driver private area
+ *
+ * @param[in] dev_private Address of a private context area
+ *
+ * @return The address of the file descriptor structure defining @a
+ * dev_private.
+ */
+static inline struct rtdm_fd *rtdm_private_to_fd(void *dev_private)
+{
+	struct rtdm_dev_context *ctx;
+	ctx = container_of(dev_private, struct rtdm_dev_context, dev_private);
+	return &ctx->fd;
+}
+
+/**
+ * Tell whether the passed file descriptor belongs to an application.
+ *
+ * @param[in] fd File descriptor
+ *
+ * @return true if passed file descriptor belongs to an application,
+ * false otherwise.
+ */
+static inline bool rtdm_fd_is_user(struct rtdm_fd *fd)
+{
+	return rtdm_fd_owner(fd) != &cobalt_kernel_ppd;
+}
+
+/**
+ * Locate a device structure from a file descriptor.
+ *
+ * @param[in] fd File descriptor
+ *
+ * @return The address of the device structure to which this file
+ * descriptor is attached.
+ */
+static inline struct rtdm_device *rtdm_fd_device(struct rtdm_fd *fd)
+{
+	return rtdm_fd_to_context(fd)->device;
+}
+
+/**
+ * @brief RTDM profile information
+ *
+ * This descriptor details the profile information associated to a
+ * RTDM class of device managed by a driver.
+ *
+ * @anchor rtdm_profile_info
+ */
+struct rtdm_profile_info {
+	/** Device class name */
+	const char *name;
+	/** Device class ID, see @ref RTDM_CLASS_xxx */
+	int class_id;
+	/** Device sub-class, see RTDM_SUBCLASS_xxx definition in the
+	    @ref rtdm_profiles "Device Profiles" */
+	int subclass_id;
+	/** Supported device profile version */
+	int version;
+	/** Reserved */
+	unsigned int magic;
+	struct module *owner;
+	struct class *kdev_class;
+};
+
+struct rtdm_driver;
+
+/**
+ * @brief RTDM state management handlers
+ */
+struct rtdm_sm_ops {
+	/** Handler called upon transition to COBALT_STATE_WARMUP */ 
+	int (*start)(struct rtdm_driver *drv);
+	/** Handler called upon transition to COBALT_STATE_TEARDOWN */ 
+	int (*stop)(struct rtdm_driver *drv);
+};
+
+/**
+ * @brief RTDM driver
+ *
+ * This descriptor describes a RTDM device driver. The structure holds
+ * runtime data, therefore it must reside in writable memory.
+ */
+struct rtdm_driver {
+	/**
+	 * Class profile information. The RTDM_PROFILE_INFO() macro @b
+	 * must be used for filling up this field.
+	 * @anchor rtdm_driver_profile
+	 */
+	struct rtdm_profile_info profile_info;
+	/**
+	 * Device flags, see @ref dev_flags "Device Flags" for details
+	 * @anchor rtdm_driver_flags
+	 */
+	int device_flags;
+	/**
+	 * Size of the private memory area the core should
+	 * automatically allocate for each open file descriptor, which
+	 * is usable for storing the context data associated to each
+	 * connection. The allocated memory is zero-initialized. The
+	 * start of this area can be retrieved by a call to
+	 * rtdm_fd_to_private().
+	 */
+	size_t context_size;
+	/** Protocol device identification: protocol family (PF_xxx) */
+	int protocol_family;
+	/** Protocol device identification: socket type (SOCK_xxx) */
+	int socket_type;
+	/** I/O operation handlers */
+	struct rtdm_fd_ops ops;
+	/** State management handlers */
+	struct rtdm_sm_ops smops;
+	/**
+	 * Count of devices this driver manages. This value is used to
+	 * allocate a chrdev region for named devices.
+	 */
+	int device_count;
+	/** Base minor for named devices. */
+	int base_minor;
+	/** Reserved area */
+	struct {
+		union {
+			struct {
+				struct cdev cdev;
+				int major;
+			} named;
+		};
+		atomic_t refcount;
+		struct notifier_block nb_statechange;
+		DECLARE_BITMAP(minor_map, RTDM_MAX_MINOR);
+	};
+};
+
+#define RTDM_CLASS_MAGIC	0x8284636c
+
+/**
+ * @brief Initializer for class profile information.
+ *
+ * This macro must be used to fill in the @ref rtdm_profile_info
+ * "class profile information" field from a RTDM driver.
+ *
+ * @param __name Class name (unquoted).
+ *
+ * @param __id Class major identification number
+ * (profile_version.class_id).
+ *
+ * @param __subid Class minor identification number
+ * (profile_version.subclass_id).
+ *
+ * @param __version Profile version number.
+ *
+ * @note See @ref rtdm_profiles "Device Profiles".
+ */
+#define RTDM_PROFILE_INFO(__name, __id, __subid, __version)	\
+{								\
+	.name = ( # __name ),					\
+	.class_id = (__id),					\
+	.subclass_id = (__subid),				\
+	.version = (__version),					\
+	.magic = ~RTDM_CLASS_MAGIC,				\
+	.owner = THIS_MODULE,					\
+	.kdev_class = NULL,					\
+}
+
+int rtdm_drv_set_sysclass(struct rtdm_driver *drv, struct class *cls);
+
+/**
+ * @brief RTDM device
+ *
+ * This descriptor describes a RTDM device instance. The structure
+ * holds runtime data, therefore it must reside in writable memory.
+ */
+struct rtdm_device {
+	/** Device driver. */
+	struct rtdm_driver *driver;
+	/** Driver definable device data */
+	void *device_data;
+	/**
+	 * Device label template for composing the device name. A
+	 * limited printf-like format string is assumed, with a
+	 * provision for replacing the first %d/%i placeholder found
+	 * in the string by the device minor number.  It is up to the
+	 * driver to actually mention this placeholder or not,
+	 * depending on the naming convention for its devices.  For
+	 * named devices, the corresponding device node will
+	 * automatically appear in the /dev/rtdm hierachy with
+	 * hotplug-enabled device filesystems (DEVTMPFS).
+	 */
+	const char *label;
+	/**
+	 * Minor number of the device. If RTDM_FIXED_MINOR is present
+	 * in the driver flags, the value stored in this field is used
+	 * verbatim by rtdm_dev_register(). Otherwise, the RTDM core
+	 * automatically assigns minor numbers to all devices managed
+	 * by the driver referred to by @a driver, in order of
+	 * registration, storing the resulting values into this field.
+	 *
+	 * Device nodes created for named devices in the Linux /dev
+	 * hierarchy are assigned this minor number.
+	 *
+	 * The minor number of the current device handling an I/O
+	 * request can be retreived by a call to rtdm_fd_minor().
+	 */
+	int minor;
+	/** Reserved area. */
+	struct {
+		unsigned int magic;
+		char *name;
+		union {
+			struct {
+				xnhandle_t handle;
+			} named;
+			struct {
+				struct xnid id;
+			} proto;
+		};
+		dev_t rdev;
+		struct device *kdev;
+		struct class *kdev_class;
+		atomic_t refcount;
+		struct rtdm_fd_ops ops;
+		wait_queue_head_t putwq;
+		struct list_head openfd_list;
+	};
+};
+
+/* --- device registration --- */
+
+int rtdm_dev_register(struct rtdm_device *device);
+
+void rtdm_dev_unregister(struct rtdm_device *device);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+
+static inline struct device *rtdm_dev_to_kdev(struct rtdm_device *device)
+{
+	return device->kdev;
+}
+
+/* --- clock services --- */
+static inline nanosecs_abs_t rtdm_clock_read(void)
+{
+	return xnclock_read_realtime(&nkclock);
+}
+
+static inline nanosecs_abs_t rtdm_clock_read_monotonic(void)
+{
+	return xnclock_read_monotonic(&nkclock);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- timeout sequences */
+
+typedef nanosecs_abs_t rtdm_toseq_t;
+
+void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
+
+/*!
+ * @addtogroup rtdm_sync
+ * @{
+ */
+
+/*!
+ * @defgroup rtdm_sync_biglock Big dual kernel lock
+ * @{
+ */
+
+/**
+ * @brief Enter atomic section (dual kernel only)
+ *
+ * This call opens a fully atomic section, serializing execution with
+ * respect to all interrupt handlers (including for real-time IRQs)
+ * and Xenomai threads running on all CPUs.
+ *
+ * @param __context name of local variable to store the context
+ * in. This variable updated by the real-time core will hold the
+ * information required to leave the atomic section properly.
+ *
+ * @note Atomic sections may be nested. The caller is allowed to sleep
+ * on a blocking Xenomai service from primary mode within an atomic
+ * section delimited by cobalt_atomic_enter/cobalt_atomic_leave calls.
+ * On the contrary, sleeping on a regular Linux kernel service while
+ * holding such lock is NOT valid.
+ *
+ * @note Since the strongest lock is acquired by this service, it can
+ * be used to synchronize real-time and non-real-time contexts.
+ *
+ * @warning This service is not portable to the Mercury core, and
+ * should be restricted to Cobalt-specific use cases, mainly for the
+ * purpose of porting existing dual-kernel drivers which still depend
+ * on the obsolete RTDM_EXECUTE_ATOMICALLY() construct.
+ */
+#define cobalt_atomic_enter(__context)				\
+	do {							\
+		xnlock_get_irqsave(&nklock, (__context));	\
+		xnsched_lock();					\
+	} while (0)
+
+/**
+ * @brief Leave atomic section (dual kernel only)
+ *
+ * This call closes an atomic section previously opened by a call to
+ * cobalt_atomic_enter(), restoring the preemption and interrupt state
+ * which prevailed prior to entering the exited section.
+ *
+ * @param __context name of local variable which stored the context.
+ *
+ * @warning This service is not portable to the Mercury core, and
+ * should be restricted to Cobalt-specific use cases.
+ */
+#define cobalt_atomic_leave(__context)				\
+	do {							\
+		xnsched_unlock();				\
+		xnlock_put_irqrestore(&nklock, (__context));	\
+	} while (0)
+
+/**
+ * @brief Execute code block atomically (DEPRECATED)
+ *
+ * Generally, it is illegal to suspend the current task by calling
+ * rtdm_task_sleep(), rtdm_event_wait(), etc. while holding a spinlock. In
+ * contrast, this macro allows to combine several operations including
+ * a potentially rescheduling call to an atomic code block with respect to
+ * other RTDM_EXECUTE_ATOMICALLY() blocks. The macro is a light-weight
+ * alternative for protecting code blocks via mutexes, and it can even be used
+ * to synchronise real-time and non-real-time contexts.
+ *
+ * @param code_block Commands to be executed atomically
+ *
+ * @note It is not allowed to leave the code block explicitly by using
+ * @c break, @c return, @c goto, etc. This would leave the global lock held
+ * during the code block execution in an inconsistent state. Moreover, do not
+ * embed complex operations into the code bock. Consider that they will be
+ * executed under preemption lock with interrupts switched-off. Also note that
+ * invocation of rescheduling calls may break the atomicity until the task
+ * gains the CPU again.
+ *
+ * @coretags{unrestricted}
+ *
+ * @deprecated This construct will be phased out in Xenomai
+ * 3.0. Please use rtdm_waitqueue services instead.
+ *
+ * @see cobalt_atomic_enter().
+ */
+#ifdef DOXYGEN_CPP /* Beautify doxygen output */
+#define RTDM_EXECUTE_ATOMICALLY(code_block)	\
+{						\
+	<ENTER_ATOMIC_SECTION>			\
+	code_block;				\
+	<LEAVE_ATOMIC_SECTION>			\
+}
+#else /* This is how it really works */
+static inline __attribute__((deprecated)) void
+rtdm_execute_atomically(void) { }
+
+#define RTDM_EXECUTE_ATOMICALLY(code_block)		\
+{							\
+	spl_t __rtdm_s;					\
+							\
+	rtdm_execute_atomically();			\
+	xnlock_get_irqsave(&nklock, __rtdm_s);		\
+	xnsched_lock();					\
+	code_block;					\
+	xnsched_unlock();				\
+	xnlock_put_irqrestore(&nklock, __rtdm_s);	\
+}
+#endif
+
+/** @} Big dual kernel lock */
+
+/**
+ * @defgroup rtdm_sync_spinlock Spinlock with preemption deactivation
+ * @{
+ */
+
+/**
+ * Static lock initialisation
+ */
+#define RTDM_LOCK_UNLOCKED(__name)	PIPELINE_SPIN_LOCK_UNLOCKED(__name)
+
+#define DEFINE_RTDM_LOCK(__name)		\
+	rtdm_lock_t __name = RTDM_LOCK_UNLOCKED(__name)
+
+/** Lock variable */
+typedef pipeline_spinlock_t rtdm_lock_t;
+
+/** Variable to save the context while holding a lock */
+typedef unsigned long rtdm_lockctx_t;
+
+/**
+ * Dynamic lock initialisation
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{task-unrestricted}
+ */
+static inline void rtdm_lock_init(rtdm_lock_t *lock)
+{
+	raw_spin_lock_init(lock);
+}
+
+/**
+ * Acquire lock from non-preemptible contexts
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{unrestricted}
+ */
+static inline void rtdm_lock_get(rtdm_lock_t *lock)
+{
+	XENO_BUG_ON(COBALT, !spltest());
+	raw_spin_lock(lock);
+	xnsched_lock();
+}
+
+/**
+ * Release lock without preemption restoration
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+static inline void rtdm_lock_put(rtdm_lock_t *lock)
+{
+	raw_spin_unlock(lock);
+	xnsched_unlock();
+}
+
+/**
+ * Acquire lock and disable preemption, by stalling the head domain.
+ *
+ * @param __lock Address of lock variable
+ * @param __context name of local variable to store the context in
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_get_irqsave(__lock, __context)	\
+	((__context) = __rtdm_lock_get_irqsave(__lock))
+
+static inline rtdm_lockctx_t __rtdm_lock_get_irqsave(rtdm_lock_t *lock)
+{
+	rtdm_lockctx_t context;
+
+	splhigh(context);
+	raw_spin_lock(lock);
+	xnsched_lock();
+
+	return context;
+}
+
+/**
+ * Release lock and restore preemption state
+ *
+ * @param lock Address of lock variable
+ * @param context name of local variable which stored the context
+ *
+ * @coretags{unrestricted}
+ */
+static inline
+void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context)
+{
+	raw_spin_unlock(lock);
+	xnsched_unlock();
+	splexit(context);
+}
+
+/**
+ * Disable preemption locally
+ *
+ * @param __context name of local variable to store the context in
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_irqsave(__context)	\
+	splhigh(__context)
+
+/**
+ * Restore preemption state
+ *
+ * @param __context name of local variable which stored the context
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_irqrestore(__context)	\
+	splexit(__context)
+
+/** @} Spinlock with Preemption Deactivation */
+
+#ifndef DOXYGEN_CPP
+
+struct rtdm_waitqueue {
+	struct xnsynch wait;
+};
+typedef struct rtdm_waitqueue rtdm_waitqueue_t;
+
+#define RTDM_WAITQUEUE_INITIALIZER(__name) {		 \
+	    .wait = XNSYNCH_WAITQUEUE_INITIALIZER((__name).wait), \
+	}
+
+#define DEFINE_RTDM_WAITQUEUE(__name)				\
+	struct rtdm_waitqueue __name = RTDM_WAITQUEUE_INITIALIZER(__name)
+
+#define DEFINE_RTDM_WAITQUEUE_ONSTACK(__name)	\
+	DEFINE_RTDM_WAITQUEUE(__name)
+
+static inline void rtdm_waitqueue_init(struct rtdm_waitqueue *wq)
+{
+	*wq = (struct rtdm_waitqueue)RTDM_WAITQUEUE_INITIALIZER(*wq);
+}
+
+static inline void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq)
+{
+	xnsynch_destroy(&wq->wait);
+}
+
+static inline int __rtdm_dowait(struct rtdm_waitqueue *wq,
+				nanosecs_rel_t timeout, xntmode_t timeout_mode)
+{
+	int ret;
+	
+	ret = xnsynch_sleep_on(&wq->wait, timeout, timeout_mode);
+	if (ret & XNBREAK)
+		return -EINTR;
+	if (ret & XNTIMEO)
+		return -ETIMEDOUT;
+	if (ret & XNRMID)
+		return -EIDRM;
+	return 0;
+}
+
+static inline int __rtdm_timedwait(struct rtdm_waitqueue *wq,
+				   nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+{
+	if (toseq && timeout > 0)
+		return __rtdm_dowait(wq, *toseq, XN_ABSOLUTE);
+
+	return __rtdm_dowait(wq, timeout, XN_RELATIVE);
+}
+
+#define rtdm_timedwait_condition_locked(__wq, __cond, __timeout, __toseq) \
+	({								\
+		int __ret = 0;						\
+		while (__ret == 0 && !(__cond))				\
+			__ret = __rtdm_timedwait(__wq, __timeout, __toseq); \
+		__ret;							\
+	})
+
+#define rtdm_wait_condition_locked(__wq, __cond)			\
+	({								\
+		int __ret = 0;						\
+		while (__ret == 0 && !(__cond))				\
+			__ret = __rtdm_dowait(__wq,			\
+					      XN_INFINITE, XN_RELATIVE); \
+		__ret;							\
+	})
+
+#define rtdm_timedwait_condition(__wq, __cond, __timeout, __toseq)	\
+	({								\
+		spl_t __s;						\
+		int __ret;						\
+		xnlock_get_irqsave(&nklock, __s);			\
+		__ret = rtdm_timedwait_condition_locked(__wq, __cond,	\
+					      __timeout, __toseq);	\
+		xnlock_put_irqrestore(&nklock, __s);			\
+		__ret;							\
+	})
+
+#define rtdm_timedwait(__wq, __timeout, __toseq)			\
+	__rtdm_timedwait(__wq, __timeout, __toseq)
+
+#define rtdm_timedwait_locked(__wq, __timeout, __toseq)			\
+	rtdm_timedwait(__wq, __timeout, __toseq)
+
+#define rtdm_wait_condition(__wq, __cond)				\
+	({								\
+		spl_t __s;						\
+		int __ret;						\
+		xnlock_get_irqsave(&nklock, __s);			\
+		__ret = rtdm_wait_condition_locked(__wq, __cond);	\
+		xnlock_put_irqrestore(&nklock, __s);			\
+		__ret;							\
+	})
+
+#define rtdm_wait(__wq)							\
+	__rtdm_dowait(__wq, XN_INFINITE, XN_RELATIVE)
+
+#define rtdm_wait_locked(__wq)  rtdm_wait(__wq)
+
+#define rtdm_waitqueue_lock(__wq, __context)  cobalt_atomic_enter(__context)
+
+#define rtdm_waitqueue_unlock(__wq, __context)  cobalt_atomic_leave(__context)
+
+#define rtdm_waitqueue_signal(__wq)					\
+	({								\
+		struct xnthread *__waiter;				\
+		__waiter = xnsynch_wakeup_one_sleeper(&(__wq)->wait);	\
+		xnsched_run();						\
+		__waiter != NULL;					\
+	})
+
+#define __rtdm_waitqueue_flush(__wq, __reason)				\
+	({								\
+		int __ret;						\
+		__ret = xnsynch_flush(&(__wq)->wait, __reason);		\
+		xnsched_run();						\
+		__ret == XNSYNCH_RESCHED;				\
+	})
+
+#define rtdm_waitqueue_broadcast(__wq)	\
+	__rtdm_waitqueue_flush(__wq, 0)
+
+#define rtdm_waitqueue_flush(__wq)	\
+	__rtdm_waitqueue_flush(__wq, XNBREAK)
+
+#define rtdm_waitqueue_wakeup(__wq, __waiter)				\
+	do {								\
+		xnsynch_wakeup_this_sleeper(&(__wq)->wait, __waiter);	\
+		xnsched_run();						\
+	} while (0)
+
+#define rtdm_for_each_waiter(__pos, __wq)		\
+	xnsynch_for_each_sleeper(__pos, &(__wq)->wait)
+
+#define rtdm_for_each_waiter_safe(__pos, __tmp, __wq)	\
+	xnsynch_for_each_sleeper_safe(__pos, __tmp, &(__wq)->wait)
+
+#endif /* !DOXYGEN_CPP */
+
+/** @} rtdm_sync */
+
+/* --- Interrupt management services --- */
+/*!
+ * @addtogroup rtdm_irq
+ * @{
+ */
+
+typedef struct xnintr rtdm_irq_t;
+
+/*!
+ * @anchor RTDM_IRQTYPE_xxx   @name RTDM_IRQTYPE_xxx
+ * Interrupt registrations flags
+ * @{
+ */
+/** Enable IRQ-sharing with other real-time drivers */
+#define RTDM_IRQTYPE_SHARED		XN_IRQTYPE_SHARED
+/** Mark IRQ as edge-triggered, relevant for correct handling of shared
+ *  edge-triggered IRQs */
+#define RTDM_IRQTYPE_EDGE		XN_IRQTYPE_EDGE
+/** @} RTDM_IRQTYPE_xxx */
+
+/**
+ * Interrupt handler
+ *
+ * @param[in] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 or a combination of @ref RTDM_IRQ_xxx flags
+ */
+typedef int (*rtdm_irq_handler_t)(rtdm_irq_t *irq_handle);
+
+/*!
+ * @anchor RTDM_IRQ_xxx   @name RTDM_IRQ_xxx
+ * Return flags of interrupt handlers
+ * @{
+ */
+/** Unhandled interrupt */
+#define RTDM_IRQ_NONE			XN_IRQ_NONE
+/** Denote handled interrupt */
+#define RTDM_IRQ_HANDLED		XN_IRQ_HANDLED
+/** Request interrupt disabling on exit */
+#define RTDM_IRQ_DISABLE		XN_IRQ_DISABLE
+/** @} RTDM_IRQ_xxx */
+
+/**
+ * Retrieve IRQ handler argument
+ *
+ * @param irq_handle IRQ handle
+ * @param type Type of the pointer to return
+ *
+ * @return The argument pointer registered on rtdm_irq_request() is returned,
+ * type-casted to the specified @a type.
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_irq_get_arg(irq_handle, type)	((type *)irq_handle->cookie)
+/** @} rtdm_irq */
+
+int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
+		     rtdm_irq_handler_t handler, unsigned long flags,
+		     const char *device_name, void *arg);
+
+int rtdm_irq_request_affine(rtdm_irq_t *irq_handle, unsigned int irq_no,
+			    rtdm_irq_handler_t handler, unsigned long flags,
+			    const char *device_name, void *arg,
+			    const cpumask_t *cpumask);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline int rtdm_irq_free(rtdm_irq_t *irq_handle)
+{
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+	xnintr_destroy(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_enable(rtdm_irq_t *irq_handle)
+{
+	xnintr_enable(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_disable(rtdm_irq_t *irq_handle)
+{
+	xnintr_disable(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_set_affinity(rtdm_irq_t *irq_handle,
+					const cpumask_t *cpumask)
+{
+	return xnintr_affinity(irq_handle, cpumask);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- non-real-time signalling services --- */
+
+/*!
+ * @addtogroup rtdm_nrtsignal
+ * @{
+ */
+
+typedef struct rtdm_nrtsig rtdm_nrtsig_t;
+/**
+ * Non-real-time signal handler
+ *
+ * @param[in] nrt_sig Signal handle pointer as passed to rtdm_nrtsig_init()
+ * @param[in] arg Argument as passed to rtdm_nrtsig_init()
+ *
+ * @note The signal handler will run in soft-IRQ context of the non-real-time
+ * subsystem. Note the implications of this context, e.g. no invocation of
+ * blocking operations.
+ */
+typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t *nrt_sig, void *arg);
+
+struct rtdm_nrtsig {
+	struct pipeline_inband_work inband_work; /* Must be first */
+	rtdm_nrtsig_handler_t handler;
+	void *arg;
+};
+
+void rtdm_schedule_nrt_work(struct work_struct *lostage_work);
+/** @} rtdm_nrtsignal */
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+void __rtdm_nrtsig_execute(struct pipeline_inband_work *inband_work);
+
+static inline void rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig,
+				    rtdm_nrtsig_handler_t handler, void *arg)
+{
+	nrt_sig->inband_work = (struct pipeline_inband_work)
+		PIPELINE_INBAND_WORK_INITIALIZER(*nrt_sig,
+						 __rtdm_nrtsig_execute);
+	nrt_sig->handler = handler;
+	nrt_sig->arg = arg;
+}
+
+static inline void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig)
+{
+	nrt_sig->handler = NULL;
+	nrt_sig->arg = NULL;
+}
+
+void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig);
+#endif /* !DOXYGEN_CPP */
+
+/* --- timer services --- */
+
+/*!
+ * @addtogroup rtdm_timer
+ * @{
+ */
+
+typedef struct xntimer rtdm_timer_t;
+
+/**
+ * Timer handler
+ *
+ * @param[in] timer Timer handle as returned by rtdm_timer_init()
+ */
+typedef void (*rtdm_timer_handler_t)(rtdm_timer_t *timer);
+
+/*!
+ * @anchor RTDM_TIMERMODE_xxx   @name RTDM_TIMERMODE_xxx
+ * Timer operation modes
+ * @{
+ */
+enum rtdm_timer_mode {
+	/** Monotonic timer with relative timeout */
+	RTDM_TIMERMODE_RELATIVE = XN_RELATIVE,
+
+	/** Monotonic timer with absolute timeout */
+	RTDM_TIMERMODE_ABSOLUTE = XN_ABSOLUTE,
+
+	/** Adjustable timer with absolute timeout */
+	RTDM_TIMERMODE_REALTIME = XN_REALTIME
+};
+/** @} RTDM_TIMERMODE_xxx */
+
+/** @} rtdm_timer */
+
+int rtdm_timer_init(rtdm_timer_t *timer, rtdm_timer_handler_t handler,
+		    const char *name);
+
+void rtdm_timer_destroy(rtdm_timer_t *timer);
+
+int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry,
+		     nanosecs_rel_t interval, enum rtdm_timer_mode mode);
+
+void rtdm_timer_stop(rtdm_timer_t *timer);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline int rtdm_timer_start_in_handler(rtdm_timer_t *timer,
+					      nanosecs_abs_t expiry,
+					      nanosecs_rel_t interval,
+					      enum rtdm_timer_mode mode)
+{
+	return xntimer_start(timer, expiry, interval, (xntmode_t)mode);
+}
+
+static inline void rtdm_timer_stop_in_handler(rtdm_timer_t *timer)
+{
+	xntimer_stop(timer);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- task services --- */
+/*!
+ * @addtogroup rtdm_task
+ * @{
+ */
+
+typedef struct xnthread rtdm_task_t;
+
+/**
+ * Real-time task procedure
+ *
+ * @param[in,out] arg argument as passed to rtdm_task_init()
+ */
+typedef void (*rtdm_task_proc_t)(void *arg);
+
+/**
+ * @anchor rtdmtaskprio @name Task Priority Range
+ * Maximum and minimum task priorities
+ * @{ */
+#define RTDM_TASK_LOWEST_PRIORITY	0
+#define RTDM_TASK_HIGHEST_PRIORITY	99
+/** @} Task Priority Range */
+
+/**
+ * @anchor rtdmchangetaskprio @name Task Priority Modification
+ * Raise or lower task priorities by one level
+ * @{ */
+#define RTDM_TASK_RAISE_PRIORITY	(+1)
+#define RTDM_TASK_LOWER_PRIORITY	(-1)
+/** @} Task Priority Modification */
+
+/** @} rtdm_task */
+
+int rtdm_task_init(rtdm_task_t *task, const char *name,
+		   rtdm_task_proc_t task_proc, void *arg,
+		   int priority, nanosecs_rel_t period);
+int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode);
+void rtdm_task_busy_sleep(nanosecs_rel_t delay);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline void rtdm_task_destroy(rtdm_task_t *task)
+{
+	xnthread_cancel(task);
+	xnthread_join(task, true);
+}
+
+static inline int rtdm_task_should_stop(void)
+{
+	return xnthread_test_info(xnthread_current(), XNCANCELD);
+}
+
+void rtdm_task_join(rtdm_task_t *task);
+
+static inline void __deprecated rtdm_task_join_nrt(rtdm_task_t *task,
+						   unsigned int poll_delay)
+{
+	rtdm_task_join(task);
+}
+
+static inline void rtdm_task_set_priority(rtdm_task_t *task, int priority)
+{
+	union xnsched_policy_param param = { .rt = { .prio = priority } };
+	spl_t s;
+
+	splhigh(s);
+	xnthread_set_schedparam(task, &xnsched_class_rt, &param);
+	xnsched_run();
+	splexit(s);
+}
+
+static inline int rtdm_task_set_period(rtdm_task_t *task,
+				       nanosecs_abs_t start_date,
+				       nanosecs_rel_t period)
+{
+	if (period < 0)
+		period = 0;
+	if (start_date == 0)
+		start_date = XN_INFINITE;
+
+	return xnthread_set_periodic(task, start_date, XN_ABSOLUTE, period);
+}
+
+static inline int rtdm_task_unblock(rtdm_task_t *task)
+{
+	spl_t s;
+	int res;
+
+	splhigh(s);
+	res = xnthread_unblock(task);
+	xnsched_run();
+	splexit(s);
+
+	return res;
+}
+
+static inline rtdm_task_t *rtdm_task_current(void)
+{
+	return xnthread_current();
+}
+
+static inline int rtdm_task_wait_period(unsigned long *overruns_r)
+{
+	if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p()))
+		return -EPERM;
+	return xnthread_wait_period(overruns_r);
+}
+
+static inline int rtdm_task_sleep(nanosecs_rel_t delay)
+{
+	return __rtdm_task_sleep(delay, XN_RELATIVE);
+}
+
+static inline int
+rtdm_task_sleep_abs(nanosecs_abs_t wakeup_date, enum rtdm_timer_mode mode)
+{
+	/* For the sake of a consistent API usage... */
+	if (mode != RTDM_TIMERMODE_ABSOLUTE && mode != RTDM_TIMERMODE_REALTIME)
+		return -EINVAL;
+	return __rtdm_task_sleep(wakeup_date, (xntmode_t)mode);
+}
+
+/* rtdm_task_sleep_abs shall be used instead */
+static inline int __deprecated rtdm_task_sleep_until(nanosecs_abs_t wakeup_time)
+{
+	return __rtdm_task_sleep(wakeup_time, XN_REALTIME);
+}
+
+#define rtdm_task_busy_wait(__condition, __spin_ns, __sleep_ns)			\
+	({									\
+		__label__ done;							\
+		nanosecs_abs_t __end;						\
+		int __ret = 0;							\
+		for (;;) {							\
+			__end = rtdm_clock_read_monotonic() + __spin_ns;	\
+			for (;;) {						\
+				if (__condition)				\
+					goto done;				\
+				if (rtdm_clock_read_monotonic() >= __end)	\
+					break;					\
+			}							\
+			__ret = rtdm_task_sleep(__sleep_ns);			\
+			if (__ret)						\
+				break;						\
+		}								\
+	done:									\
+		__ret;								\
+	})
+
+#define rtdm_wait_context	xnthread_wait_context
+
+static inline
+void rtdm_wait_complete(struct rtdm_wait_context *wc)
+{
+	xnthread_complete_wait(wc);
+}
+
+static inline
+int rtdm_wait_is_completed(struct rtdm_wait_context *wc)
+{
+	return xnthread_wait_complete_p(wc);
+}
+
+static inline void rtdm_wait_prepare(struct rtdm_wait_context *wc)
+{
+	xnthread_prepare_wait(wc);
+}
+
+static inline
+struct rtdm_wait_context *rtdm_wait_get_context(rtdm_task_t *task)
+{
+	return xnthread_get_wait_context(task);
+}
+
+#endif /* !DOXYGEN_CPP */
+
+/* --- event services --- */
+
+typedef struct rtdm_event {
+	struct xnsynch synch_base;
+	DECLARE_XNSELECT(select_block);
+} rtdm_event_t;
+
+#define RTDM_EVENT_PENDING		XNSYNCH_SPARE1
+
+void rtdm_event_init(rtdm_event_t *event, unsigned long pending);
+int rtdm_event_select(rtdm_event_t *event, rtdm_selector_t *selector,
+		      enum rtdm_selecttype type, unsigned fd_index);
+int rtdm_event_wait(rtdm_event_t *event);
+int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq);
+void rtdm_event_signal(rtdm_event_t *event);
+
+void rtdm_event_clear(rtdm_event_t *event);
+
+void rtdm_event_pulse(rtdm_event_t *event);
+
+void rtdm_event_destroy(rtdm_event_t *event);
+
+/* --- semaphore services --- */
+
+typedef struct rtdm_sem {
+	unsigned long value;
+	struct xnsynch synch_base;
+	DECLARE_XNSELECT(select_block);
+} rtdm_sem_t;
+
+void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value);
+int rtdm_sem_select(rtdm_sem_t *sem, rtdm_selector_t *selector,
+		    enum rtdm_selecttype type, unsigned fd_index);
+int rtdm_sem_down(rtdm_sem_t *sem);
+int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
+		       rtdm_toseq_t *timeout_seq);
+void rtdm_sem_up(rtdm_sem_t *sem);
+
+void rtdm_sem_destroy(rtdm_sem_t *sem);
+
+/* --- mutex services --- */
+
+typedef struct rtdm_mutex {
+	struct xnsynch synch_base;
+	atomic_t fastlock;
+} rtdm_mutex_t;
+
+void rtdm_mutex_init(rtdm_mutex_t *mutex);
+int rtdm_mutex_lock(rtdm_mutex_t *mutex);
+int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq);
+void rtdm_mutex_unlock(rtdm_mutex_t *mutex);
+void rtdm_mutex_destroy(rtdm_mutex_t *mutex);
+
+/* --- utility functions --- */
+
+#define rtdm_printk(format, ...)	printk(format, ##__VA_ARGS__)
+
+#define rtdm_printk_ratelimited(fmt, ...)  do {				\
+	if (xnclock_ratelimit())					\
+		printk(fmt, ##__VA_ARGS__);				\
+} while (0)
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline void *rtdm_malloc(size_t size)
+{
+	return xnmalloc(size);
+}
+
+static inline void rtdm_free(void *ptr)
+{
+	xnfree(ptr);
+}
+
+int rtdm_mmap_to_user(struct rtdm_fd *fd,
+		      void *src_addr, size_t len,
+		      int prot, void **pptr,
+		      struct vm_operations_struct *vm_ops,
+		      void *vm_private_data);
+
+int rtdm_iomap_to_user(struct rtdm_fd *fd,
+		       phys_addr_t src_addr, size_t len,
+		       int prot, void **pptr,
+		       struct vm_operations_struct *vm_ops,
+		       void *vm_private_data);
+
+int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va);
+
+int rtdm_mmap_vmem(struct vm_area_struct *vma, void *va);
+
+int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa);
+
+int rtdm_munmap(void *ptr, size_t len);
+
+static inline int rtdm_read_user_ok(struct rtdm_fd *fd,
+				    const void __user *ptr, size_t size)
+{
+	return access_rok(ptr, size);
+}
+
+static inline int rtdm_rw_user_ok(struct rtdm_fd *fd,
+				  const void __user *ptr, size_t size)
+{
+	return access_wok(ptr, size);
+}
+
+static inline int rtdm_copy_from_user(struct rtdm_fd *fd,
+				      void *dst, const void __user *src,
+				      size_t size)
+{
+	return __xn_copy_from_user(dst, src, size) ? -EFAULT : 0;
+}
+
+static inline int rtdm_safe_copy_from_user(struct rtdm_fd *fd,
+					   void *dst, const void __user *src,
+					   size_t size)
+{
+	return cobalt_copy_from_user(dst, src, size);
+}
+
+static inline int rtdm_copy_to_user(struct rtdm_fd *fd,
+				    void __user *dst, const void *src,
+				    size_t size)
+{
+	return __xn_copy_to_user(dst, src, size) ? -EFAULT : 0;
+}
+
+static inline int rtdm_safe_copy_to_user(struct rtdm_fd *fd,
+					 void __user *dst, const void *src,
+					 size_t size)
+{
+	return cobalt_copy_to_user(dst, src, size);
+}
+
+static inline int rtdm_strncpy_from_user(struct rtdm_fd *fd,
+					 char *dst,
+					 const char __user *src, size_t count)
+{
+	return cobalt_strncpy_from_user(dst, src, count);
+}
+
+static inline bool rtdm_available(void)
+{
+	return realtime_core_enabled();
+}
+
+static inline int rtdm_rt_capable(struct rtdm_fd *fd)
+{
+	if (!XENO_ASSERT(COBALT, !xnsched_interrupt_p()))
+		return 0;
+
+	if (!rtdm_fd_is_user(fd))
+		return !xnsched_root_p();
+
+	return xnthread_current() != NULL;
+}
+
+static inline int rtdm_in_rt_context(void)
+{
+	return is_primary_domain();
+}
+
+#define RTDM_IOV_FASTMAX  16
+
+int rtdm_get_iovec(struct rtdm_fd *fd, struct iovec **iov,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast);
+
+int rtdm_put_iovec(struct rtdm_fd *fd, struct iovec *iov,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast);
+
+static inline
+void rtdm_drop_iovec(struct iovec *iov, struct iovec *iov_fast)
+{
+	if (iov != iov_fast)
+		xnfree(iov);
+}
+
+ssize_t rtdm_get_iov_flatlen(struct iovec *iov, int iovlen);
+
+#endif /* !DOXYGEN_CPP */
+
+#endif /* _COBALT_RTDM_DRIVER_H */
+++ linux-patched/include/xenomai/rtdm/rtdm.h	2022-03-21 12:58:31.872864749 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/gpiopwm.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_RTDM_H
+#define _COBALT_RTDM_RTDM_H
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+#include <linux/socket.h>
+#include <cobalt/kernel/ppd.h>
+#include <rtdm/fd.h>
+
+typedef __u32 socklen_t;
+
+#include <rtdm/uapi/rtdm.h>
+
+int __rtdm_dev_open(const char *path, int oflag);
+
+int __rtdm_dev_socket(int protocol_family,
+		      int socket_type, int protocol);
+
+static inline int rtdm_open(const char *path, int oflag, ...)
+{
+	return __rtdm_dev_open(path, oflag);
+}
+
+static inline int rtdm_socket(int protocol_family,
+			      int socket_type, int protocol)
+{
+	return __rtdm_dev_socket(protocol_family, socket_type, protocol);
+}
+
+static inline int rtdm_close(int fd)
+{
+	return rtdm_fd_close(fd, RTDM_FD_MAGIC);
+}
+
+#define rtdm_fcntl(__fd, __cmd, __args...)	\
+	rtdm_fd_fcntl(__fd, __cmd, ##__args)
+
+#define rtdm_ioctl(__fd, __request, __args...)	\
+	rtdm_fd_ioctl(__fd, __request, ##__args)
+
+static inline ssize_t rtdm_read(int fd, void *buf, size_t count)
+{
+	return rtdm_fd_read(fd, buf, count);
+}
+
+static inline ssize_t rtdm_write(int fd, const void *buf, size_t count)
+{
+	return rtdm_fd_write(fd, buf, count);
+}
+
+static inline ssize_t rtdm_recvmsg(int s, struct user_msghdr *msg, int flags)
+{
+	return rtdm_fd_recvmsg(s, msg, flags);
+}
+
+static inline ssize_t rtdm_sendmsg(int s, const struct user_msghdr *msg, int flags)
+{
+	return rtdm_fd_sendmsg(s, msg, flags);
+}
+
+static inline
+ssize_t rtdm_recvfrom(int s, void *buf, size_t len, int flags,
+		      struct sockaddr *from,
+		      socklen_t *fromlen)
+{
+	struct user_msghdr msg;
+	struct iovec iov;
+	ssize_t ret;
+
+	iov.iov_base = buf;
+	iov.iov_len = len;
+	msg.msg_name = from;
+	msg.msg_namelen = from ? *fromlen : 0;
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+
+	ret = rtdm_recvmsg(s, &msg, flags);
+	if (ret < 0)
+		return ret;
+
+	if (from)
+		*fromlen = msg.msg_namelen;
+
+	return ret;
+}
+
+static inline ssize_t rtdm_recv(int s, void *buf, size_t len, int flags)
+{
+	return rtdm_recvfrom(s, buf, len, flags, NULL, NULL);
+}
+
+static inline ssize_t rtdm_sendto(int s, const void *buf, size_t len,
+				  int flags, const struct sockaddr *to,
+				  socklen_t tolen)
+{
+	struct user_msghdr msg;
+	struct iovec iov;
+
+	iov.iov_base = (void *)buf;
+	iov.iov_len = len;
+	msg.msg_name = (struct sockaddr *)to;
+	msg.msg_namelen = tolen;
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+
+	return rtdm_sendmsg(s, &msg, flags);
+}
+
+static inline ssize_t rtdm_send(int s, const void *buf, size_t len, int flags)
+{
+	return rtdm_sendto(s, buf, len, flags, NULL, 0);
+}
+
+static inline int rtdm_getsockopt(int s, int level, int optname,
+				  void *optval, socklen_t *optlen)
+{
+	struct _rtdm_getsockopt_args args = {
+		level, optname, optval, optlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETSOCKOPT, &args);
+}
+
+static inline int rtdm_setsockopt(int s, int level, int optname,
+				  const void *optval, socklen_t optlen)
+{
+	struct _rtdm_setsockopt_args args = {
+		level, optname, (void *)optval, optlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_SETSOCKOPT, &args);
+}
+
+static inline int rtdm_bind(int s, const struct sockaddr *my_addr,
+			    socklen_t addrlen)
+{
+	struct _rtdm_setsockaddr_args args = {
+		my_addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_BIND, &args);
+}
+
+static inline int rtdm_connect(int s, const struct sockaddr *serv_addr,
+			       socklen_t addrlen)
+{
+	struct _rtdm_setsockaddr_args args = {
+		serv_addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_CONNECT, &args);
+}
+
+static inline int rtdm_listen(int s, int backlog)
+{
+	return rtdm_ioctl(s, _RTIOC_LISTEN, backlog);
+}
+
+static inline int rtdm_accept(int s, struct sockaddr *addr,
+			      socklen_t *addrlen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_ACCEPT, &args);
+}
+
+static inline int rtdm_getsockname(int s, struct sockaddr *name,
+				   socklen_t *namelen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		name, namelen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETSOCKNAME, &args);
+}
+
+static inline int rtdm_getpeername(int s, struct sockaddr *name,
+				   socklen_t *namelen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		name, namelen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETPEERNAME, &args);
+}
+
+static inline int rtdm_shutdown(int s, int how)
+{
+	return rtdm_ioctl(s, _RTIOC_SHUTDOWN, how);
+}
+
+#endif /* _COBALT_RTDM_RTDM_H */
+++ linux-patched/include/xenomai/rtdm/gpiopwm.h	2022-03-21 12:58:31.864864827 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/fd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_PWM_H
+#define _COBALT_RTDM_PWM_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/gpiopwm.h>
+
+#endif /* !_COBALT_RTDM_PWM_H */
+++ linux-patched/include/xenomai/rtdm/fd.h	2022-03-21 12:58:31.857864895 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/cobalt.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2008,2013,2014 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_FD_H
+#define _COBALT_KERNEL_FD_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/file.h>
+#include <cobalt/kernel/tree.h>
+#include <asm-generic/xenomai/syscall.h>
+
+struct vm_area_struct;
+struct rtdm_fd;
+struct _rtdm_mmap_request;
+struct xnselector;
+struct cobalt_ppd;
+struct rtdm_device;
+
+/**
+ * @file
+ * @anchor File operation handlers
+ * @addtogroup rtdm_device_register
+ * @{
+ */
+
+/**
+ * Open handler for named devices
+ *
+ * @param[in] fd File descriptor associated with opened device instance
+ * @param[in] oflags Open flags as passed by the user
+ *
+ * The file descriptor carries a device minor information which can be
+ * retrieved by a call to rtdm_fd_minor(fd). The minor number can be
+ * used for distinguishing devices managed by a driver.
+ *
+ * @return 0 on success. On failure, a negative error code is returned.
+ *
+ * @see @c open() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_open_handler(struct rtdm_fd *fd, int oflags);
+
+/**
+ * Socket creation handler for protocol devices
+ *
+ * @param[in] fd File descriptor associated with opened device instance
+ * @param[in] protocol Protocol number as passed by the user
+ *
+ * @return 0 on success. On failure, a negative error code is returned.
+ *
+ * @see @c socket() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_socket_handler(struct rtdm_fd *fd, int protocol);
+
+/**
+ * Close handler
+ *
+ * @param[in] fd File descriptor associated with opened
+ * device instance.
+ *
+ * @see @c close() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+void rtdm_close_handler(struct rtdm_fd *fd);
+
+/**
+ * IOCTL handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] request Request number as passed by the user
+ * @param[in,out] arg Request argument as passed by the user
+ *
+ * @return A positive value or 0 on success. On failure return either
+ * -ENOSYS, to request that the function be called again from the opposite
+ * realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c ioctl() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_ioctl_handler(struct rtdm_fd *fd, unsigned int request, void __user *arg);
+
+/**
+ * Read handler
+ *
+ * @param[in] fd File descriptor
+ * @param[out] buf Input buffer as passed by the user
+ * @param[in] size Number of bytes the user requests to read
+ *
+ * @return On success, the number of bytes read. On failure return either
+ * -ENOSYS, to request that this handler be called again from the opposite
+ * realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c read() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_read_handler(struct rtdm_fd *fd, void __user *buf, size_t size);
+
+/**
+ * Write handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] buf Output buffer as passed by the user
+ * @param[in] size Number of bytes the user requests to write
+ *
+ * @return On success, the number of bytes written. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c write() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_write_handler(struct rtdm_fd *fd, const void __user *buf, size_t size);
+
+/**
+ * Receive message handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in,out] msg Message descriptor as passed by the user, automatically
+ * mirrored to safe kernel memory in case of user mode call
+ * @param[in] flags Message flags as passed by the user
+ *
+ * @return On success, the number of bytes received. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c recvmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_recvmsg_handler(struct rtdm_fd *fd, struct user_msghdr *msg, int flags);
+
+/**
+ * Transmit message handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] msg Message descriptor as passed by the user, automatically
+ * mirrored to safe kernel memory in case of user mode call
+ * @param[in] flags Message flags as passed by the user
+ *
+ * @return On success, the number of bytes transmitted. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c sendmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_sendmsg_handler(struct rtdm_fd *fd, const struct user_msghdr *msg, int flags);
+
+/**
+ * Select handler
+ *
+ * @param[in] fd File descriptor
+ * @param selector Pointer to the selector structure
+ * @param type Type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a
+ * XNSELECT_EXCEPT)
+ * @param index Index of the file descriptor
+ *
+ * @return 0 on success. On failure, a negative error code is
+ * returned.
+ *
+ * @see @c select() in POSIX.1-2001,
+ * http://pubs.opengroup.org/onlinepubs/007908799/xsh/select.html
+ */
+int rtdm_select_handler(struct rtdm_fd *fd, struct xnselector *selector,
+			unsigned int type, unsigned int index);
+
+/**
+ * Memory mapping handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] vma Virtual memory area descriptor
+ *
+ * @return 0 on success. On failure, a negative error code is
+ * returned.
+ *
+ * @see @c mmap() in POSIX.1-2001,
+ * http://pubs.opengroup.org/onlinepubs/7908799/xsh/mmap.html
+ *
+ * @note The address hint passed to the mmap() request is deliberately
+ * ignored by RTDM.
+ */
+int rtdm_mmap_handler(struct rtdm_fd *fd, struct vm_area_struct *vma);
+
+/**
+ * Allocate mapping region in address space
+ *
+ * When present, this optional handler should return the start address
+ * of a free region in the process's address space, large enough to
+ * cover the ongoing mmap() operation. If unspecified, the default
+ * architecture-defined handler is invoked.
+ *
+ * Most drivers can omit this handler, except on MMU-less platforms
+ * (see second note).
+ *
+ * @param[in] fd File descriptor
+ * @param[in] len Length of the requested region
+ * @param[in] pgoff Page frame number to map to (see second note).
+ * @param[in] flags Requested mapping flags
+ *
+ * @return The start address of the mapping region on success. On
+ * failure, a negative error code should be returned, with -ENOSYS
+ * meaning that the driver does not want to provide such information,
+ * in which case the ongoing mmap() operation will fail.
+ *
+ * @note The address hint passed to the mmap() request is deliberately
+ * ignored by RTDM, and therefore not passed to this handler.
+ *
+ * @note On MMU-less platforms, this handler is required because RTDM
+ * issues mapping requests over a shareable character device
+ * internally. In such context, the RTDM core may pass a null @a pgoff
+ * argument to the handler, for probing for the logical start address
+ * of the memory region to map to. Otherwise, when @a pgoff is
+ * non-zero, pgoff << PAGE_SHIFT is usually returned.
+ */
+unsigned long
+rtdm_get_unmapped_area_handler(struct rtdm_fd *fd,
+			       unsigned long len, unsigned long pgoff,
+			       unsigned long flags);
+/**
+ * @anchor rtdm_fd_ops
+ * @brief RTDM file operation descriptor.
+ *
+ * This structure describes the operations available with a RTDM
+ * device, defining handlers for submitting I/O requests. Those
+ * handlers are implemented by RTDM device drivers.
+ */
+struct rtdm_fd_ops {
+	/** See rtdm_open_handler(). */
+	int (*open)(struct rtdm_fd *fd, int oflags);
+	/** See rtdm_socket_handler(). */
+	int (*socket)(struct rtdm_fd *fd, int protocol);
+	/** See rtdm_close_handler(). */
+	void (*close)(struct rtdm_fd *fd);
+	/** See rtdm_ioctl_handler(). */
+	int (*ioctl_rt)(struct rtdm_fd *fd,
+			unsigned int request, void __user *arg);
+	/** See rtdm_ioctl_handler(). */
+	int (*ioctl_nrt)(struct rtdm_fd *fd,
+			 unsigned int request, void __user *arg);
+	/** See rtdm_read_handler(). */
+	ssize_t (*read_rt)(struct rtdm_fd *fd,
+			   void __user *buf, size_t size);
+	/** See rtdm_read_handler(). */
+	ssize_t (*read_nrt)(struct rtdm_fd *fd,
+			    void __user *buf, size_t size);
+	/** See rtdm_write_handler(). */
+	ssize_t (*write_rt)(struct rtdm_fd *fd,
+			    const void __user *buf, size_t size);
+	/** See rtdm_write_handler(). */
+	ssize_t (*write_nrt)(struct rtdm_fd *fd,
+			     const void __user *buf, size_t size);
+	/** See rtdm_recvmsg_handler(). */
+	ssize_t (*recvmsg_rt)(struct rtdm_fd *fd,
+			      struct user_msghdr *msg, int flags);
+	/** See rtdm_recvmsg_handler(). */
+	ssize_t (*recvmsg_nrt)(struct rtdm_fd *fd,
+			       struct user_msghdr *msg, int flags);
+	/** See rtdm_sendmsg_handler(). */
+	ssize_t (*sendmsg_rt)(struct rtdm_fd *fd,
+			      const struct user_msghdr *msg, int flags);
+	/** See rtdm_sendmsg_handler(). */
+	ssize_t (*sendmsg_nrt)(struct rtdm_fd *fd,
+			       const struct user_msghdr *msg, int flags);
+	/** See rtdm_select_handler(). */
+	int (*select)(struct rtdm_fd *fd,
+		      struct xnselector *selector,
+		      unsigned int type, unsigned int index);
+	/** See rtdm_mmap_handler(). */
+	int (*mmap)(struct rtdm_fd *fd,
+		    struct vm_area_struct *vma);
+	/** See rtdm_get_unmapped_area_handler(). */
+	unsigned long (*get_unmapped_area)(struct rtdm_fd *fd,
+					   unsigned long len,
+					   unsigned long pgoff,
+					   unsigned long flags);
+};
+
+/** @} File operation handlers */
+
+struct rtdm_fd {
+	unsigned int magic;
+	struct rtdm_fd_ops *ops;
+	struct cobalt_ppd *owner;
+	unsigned int refs;
+	int ufd;
+	int minor;
+	int oflags;
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	int compat;
+#endif
+	bool stale;
+	struct list_head cleanup;
+	struct list_head next;	/* in dev->openfd_list */
+};
+
+#define RTDM_FD_MAGIC 0x52544446
+
+#define RTDM_FD_COMPAT	__COBALT_COMPAT_BIT
+#define RTDM_FD_COMPATX	__COBALT_COMPATX_BIT
+
+int __rtdm_anon_getfd(const char *name, int flags);
+
+void __rtdm_anon_putfd(int ufd);
+
+static inline struct cobalt_ppd *rtdm_fd_owner(const struct rtdm_fd *fd)
+{
+	return fd->owner;
+}
+
+static inline int rtdm_fd_ufd(const struct rtdm_fd *fd)
+{
+	return fd->ufd;
+}
+
+static inline int rtdm_fd_minor(const struct rtdm_fd *fd)
+{
+	return fd->minor;
+}
+
+static inline int rtdm_fd_flags(const struct rtdm_fd *fd)
+{
+	return fd->oflags;
+}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd)
+{
+	return fd->compat;
+}
+#else
+static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd)
+{
+	return 0;
+}
+#endif
+
+int rtdm_fd_enter(struct rtdm_fd *rtdm_fd, int ufd,
+		  unsigned int magic, struct rtdm_fd_ops *ops);
+
+int rtdm_fd_register(struct rtdm_fd *fd, int ufd);
+
+struct rtdm_fd *rtdm_fd_get(int ufd, unsigned int magic);
+
+int rtdm_fd_lock(struct rtdm_fd *fd);
+
+void rtdm_fd_put(struct rtdm_fd *fd);
+
+void rtdm_fd_unlock(struct rtdm_fd *fd);
+
+int rtdm_fd_fcntl(int ufd, int cmd, ...);
+
+int rtdm_fd_ioctl(int ufd, unsigned int request, ...);
+
+ssize_t rtdm_fd_read(int ufd, void __user *buf, size_t size);
+
+ssize_t rtdm_fd_write(int ufd, const void __user *buf, size_t size);
+
+int rtdm_fd_close(int ufd, unsigned int magic);
+
+ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, int flags);
+
+int __rtdm_fd_recvmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags, void __user *u_timeout,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg),
+		       int (*get_timespec)(struct timespec64 *ts, const void __user *u_ts));
+
+int __rtdm_fd_recvmmsg64(int ufd, void __user *u_msgvec, unsigned int vlen,
+			 unsigned int flags, void __user *u_timeout,
+			 int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+			 int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg));
+
+ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr *msg,
+			int flags);
+
+int __rtdm_fd_sendmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg));
+
+int rtdm_fd_mmap(int ufd, struct _rtdm_mmap_request *rma,
+		 void **u_addrp);
+
+int rtdm_fd_valid_p(int ufd);
+
+int rtdm_fd_select(int ufd, struct xnselector *selector,
+		   unsigned int type);
+
+int rtdm_device_new_fd(struct rtdm_fd *fd, int ufd,
+		struct rtdm_device *dev);
+
+void rtdm_device_flush_fds(struct rtdm_device *dev);
+
+void rtdm_fd_cleanup(struct cobalt_ppd *p);
+
+void rtdm_fd_init(void);
+
+#endif /* _COBALT_KERNEL_FD_H */
+++ linux-patched/include/xenomai/rtdm/cobalt.h	2022-03-21 12:58:31.849864973 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/rtdm_helpers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_COBALT_H
+#define _COBALT_RTDM_COBALT_H
+
+#include <xenomai/posix/process.h>
+#include <xenomai/posix/extension.h>
+#include <xenomai/posix/thread.h>
+#include <xenomai/posix/signal.h>
+#include <xenomai/posix/timer.h>
+#include <xenomai/posix/clock.h>
+#include <xenomai/posix/event.h>
+#include <xenomai/posix/monitor.h>
+#include <xenomai/posix/corectl.h>
+
+#endif /* !_COBALT_RTDM_COBALT_H */
+++ linux-patched/include/xenomai/rtdm/analogy/rtdm_helpers.h	2022-03-21 12:58:31.842865041 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/subdevice.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, Operation system facilities
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H
+#define _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H
+
+#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <rtdm/driver.h>
+
+/* --- Trace section  --- */
+#define A4L_PROMPT "Analogy: "
+
+#define RTDM_SUBCLASS_ANALOGY 0
+
+#define __a4l_err(fmt, args...)  rtdm_printk(KERN_ERR A4L_PROMPT fmt, ##args)
+#define __a4l_warn(fmt, args...) rtdm_printk(KERN_WARNING A4L_PROMPT fmt, ##args)
+
+#ifdef  CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE
+#define __a4l_info(fmt, args...) trace_printk(fmt, ##args)
+#else
+#define __a4l_info(fmt, args...) 						\
+        rtdm_printk(KERN_INFO A4L_PROMPT "%s: " fmt, __FUNCTION__, ##args)
+#endif
+
+#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG
+#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE
+#define __a4l_dbg(level, debug, fmt, args...)				\
+	do {								\
+	if ((debug) >= (level))						\
+		trace_printk(fmt, ##args); 				\
+	} while (0)
+#else
+#define __a4l_dbg(level, debug, fmt, args...)						\
+	do {										\
+	if ((debug) >= (level))								\
+		rtdm_printk(KERN_DEBUG A4L_PROMPT "%s: " fmt, __FUNCTION__ , ##args);	\
+	} while (0)
+#endif
+
+#define core_dbg CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_LEVEL
+#define drv_dbg CONFIG_XENO_DRIVERS_ANALOGY_DRIVER_DEBUG_LEVEL
+
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */
+
+#define __a4l_dbg(level, debug, fmt, args...)
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */
+
+#define __a4l_dev_name(dev) 						\
+	(dev->driver == NULL) ? "unattached dev" : dev->driver->board_name
+
+#define a4l_err(dev, fmt, args...) 					\
+	__a4l_err("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_warn(dev, fmt, args...) 					\
+	__a4l_warn("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_info(dev, fmt, args...) 					\
+	__a4l_info("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_dbg(level, debug, dev, fmt, args...)			\
+	__a4l_dbg(level, debug, "%s: " fmt, __a4l_dev_name(dev), ##args)
+
+
+/* --- Time section --- */
+static inline void a4l_udelay(unsigned int us)
+{
+	rtdm_task_busy_sleep(((nanosecs_rel_t) us) * 1000);
+}
+
+/* Function which gives absolute time */
+nanosecs_abs_t a4l_get_time(void);
+
+/* Function for setting up the absolute time recovery */
+void a4l_init_time(void);
+
+/* --- IRQ section --- */
+#define A4L_IRQ_DISABLED 0
+
+typedef int (*a4l_irq_hdlr_t) (unsigned int irq, void *d);
+
+struct a4l_irq_descriptor {
+	/* These fields are useful to launch the IRQ trampoline;
+	   that is the reason why a structure has been defined */
+	a4l_irq_hdlr_t handler;
+	unsigned int irq;
+	void *cookie;
+	rtdm_irq_t rtdm_desc;
+};
+
+int __a4l_request_irq(struct a4l_irq_descriptor * dsc,
+		      unsigned int irq,
+		      a4l_irq_hdlr_t handler,
+		      unsigned long flags, void *cookie);
+int __a4l_free_irq(struct a4l_irq_descriptor * dsc);
+
+/* --- Synchronization section --- */
+#define __NRT_WAITER 1
+#define __RT_WAITER 2
+#define __EVT_PDING 3
+
+struct a4l_sync {
+	unsigned long status;
+	rtdm_event_t rtdm_evt;
+	rtdm_nrtsig_t nrt_sig;
+	wait_queue_head_t wq;
+};
+
+#define a4l_select_sync(snc, slr, type, fd) \
+	rtdm_event_select(&((snc)->rtdm_evt), slr, type, fd)
+
+int a4l_init_sync(struct a4l_sync * snc);
+void a4l_cleanup_sync(struct a4l_sync * snc);
+void a4l_flush_sync(struct a4l_sync * snc);
+int a4l_wait_sync(struct a4l_sync * snc, int rt);
+int a4l_timedwait_sync(struct a4l_sync * snc,
+		       int rt, unsigned long long ns_timeout);
+void a4l_signal_sync(struct a4l_sync * snc);
+
+#endif /* !_COBALT_RTDM_ANALOGY_RTDM_HELPERS_H */
+++ linux-patched/include/xenomai/rtdm/analogy/subdevice.h	2022-03-21 12:58:31.835865109 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/context.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, subdevice related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_SUBDEVICE_H
+#define _COBALT_RTDM_ANALOGY_SUBDEVICE_H
+
+#include <linux/list.h>
+#include <rtdm/analogy/instruction.h>
+#include <rtdm/analogy/command.h>
+#include <rtdm/analogy/channel_range.h>
+
+/* --- Subdevice descriptor structure --- */
+
+struct a4l_device;
+struct a4l_buffer;
+
+/*!
+ * @brief Structure describing the subdevice
+ * @see a4l_add_subd()
+ */
+
+struct a4l_subdevice {
+
+	struct list_head list;
+			   /**< List stuff */
+
+	struct a4l_device *dev;
+			       /**< Containing device */
+
+	unsigned int idx;
+		      /**< Subdevice index */
+
+	struct a4l_buffer *buf;
+			       /**< Linked buffer */
+
+	/* Subdevice's status (busy, linked?) */
+	unsigned long status;
+			     /**< Subdevice's status */
+
+	/* Descriptors stuff */
+	unsigned long flags;
+			 /**< Type flags */
+	struct a4l_channels_desc *chan_desc;
+				/**< Tab of channels descriptors pointers */
+	struct a4l_rngdesc *rng_desc;
+				/**< Tab of ranges descriptors pointers */
+	struct a4l_cmd_desc *cmd_mask;
+			    /**< Command capabilities mask */
+
+	/* Functions stuff */
+	int (*insn_read) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							/**< Callback for the instruction "read" */
+	int (*insn_write) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							 /**< Callback for the instruction "write" */
+	int (*insn_bits) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							/**< Callback for the instruction "bits" */
+	int (*insn_config) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							  /**< Callback for the configuration instruction */
+	int (*do_cmd) (struct a4l_subdevice *, struct a4l_cmd_desc *);
+					/**< Callback for command handling */
+	int (*do_cmdtest) (struct a4l_subdevice *, struct a4l_cmd_desc *);
+						       /**< Callback for command checking */
+	void (*cancel) (struct a4l_subdevice *);
+					 /**< Callback for asynchronous transfer cancellation */
+	void (*munge) (struct a4l_subdevice *, void *, unsigned long);
+								/**< Callback for munge operation */
+	int (*trigger) (struct a4l_subdevice *, lsampl_t);
+					      /**< Callback for trigger operation */
+
+	char priv[0];
+		  /**< Private data */
+};
+
+/* --- Subdevice related functions and macros --- */
+
+struct a4l_channel *a4l_get_chfeat(struct a4l_subdevice * sb, int idx);
+struct a4l_range *a4l_get_rngfeat(struct a4l_subdevice * sb, int chidx, int rngidx);
+int a4l_check_chanlist(struct a4l_subdevice * subd,
+		       unsigned char nb_chan, unsigned int *chans);
+
+#define a4l_subd_is_input(x) ((A4L_SUBD_MASK_READ & (x)->flags) != 0)
+/* The following macro considers that a DIO subdevice is firstly an
+   output subdevice */
+#define a4l_subd_is_output(x) \
+	((A4L_SUBD_MASK_WRITE & (x)->flags) != 0 || \
+	 (A4L_SUBD_DIO & (x)->flags) != 0)
+
+/* --- Upper layer functions --- */
+
+struct a4l_subdevice * a4l_get_subd(struct a4l_device *dev, int idx);
+struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv,
+			    void (*setup)(struct a4l_subdevice *));
+int a4l_add_subd(struct a4l_device *dev, struct a4l_subdevice * subd);
+int a4l_ioctl_subdinfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_chaninfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_rnginfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_nbchaninfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_nbrnginfo(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_SUBDEVICE_H */
+++ linux-patched/include/xenomai/rtdm/analogy/context.h	2022-03-21 12:58:31.827865187 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/instruction.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, context structure / macros declarations
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_CONTEXT_H
+#define _COBALT_RTDM_ANALOGY_CONTEXT_H
+
+#include <rtdm/driver.h>
+
+struct a4l_device;
+struct a4l_buffer;
+
+struct a4l_device_context {
+	/* The adequate device pointer
+	   (retrieved thanks to minor at open time) */
+	struct a4l_device *dev;
+
+	/* The buffer structure contains everything to transfer data
+	   from asynchronous acquisition operations on a specific
+	   subdevice */
+	struct a4l_buffer *buffer;
+};
+
+static inline int a4l_get_minor(struct a4l_device_context *cxt)
+{
+	/* Get a pointer on the container structure */
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	/* Get the minor index */
+	return rtdm_fd_minor(fd);
+}
+
+#endif /* !_COBALT_RTDM_ANALOGY_CONTEXT_H */
+++ linux-patched/include/xenomai/rtdm/analogy/instruction.h	2022-03-21 12:58:31.820865256 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/channel_range.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, instruction related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_INSTRUCTION_H
+#define _COBALT_RTDM_ANALOGY_INSTRUCTION_H
+
+struct a4l_kernel_instruction {
+	unsigned int type;
+	unsigned int idx_subd;
+	unsigned int chan_desc;
+	unsigned int data_size;
+	void *data;
+	void *__udata;
+};
+
+struct a4l_kernel_instruction_list {
+	unsigned int count;
+	struct a4l_kernel_instruction *insns;
+	a4l_insn_t *__uinsns;
+};
+
+/* Instruction related functions */
+
+/* Upper layer functions */
+int a4l_ioctl_insnlist(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_insn(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */
+++ linux-patched/include/xenomai/rtdm/analogy/channel_range.h	2022-03-21 12:58:31.812865334 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/driver.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, channel, range related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H
+#define _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H
+
+#include <rtdm/uapi/analogy.h>
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_channel_range Channels and ranges
+ *
+ * Channels
+ *
+ * According to the Analogy nomenclature, the channel is the elementary
+ * acquisition entity. One channel is supposed to acquire one data at
+ * a time. A channel can be:
+ * - an analog input or an analog ouput;
+ * - a digital input or a digital ouput;
+ *
+ * Channels are defined by their type and by some other
+ * characteristics like:
+ * - their resolutions for analog channels (which usually ranges from
+     8 to 32 bits);
+ * - their references;
+ *
+ * Such parameters must be declared for each channel composing a
+ * subdevice. The structure a4l_channel (struct a4l_channel) is used to
+ * define one channel.
+ *
+ * Another structure named a4l_channels_desc (struct a4l_channels_desc)
+ * gathers all channels for a specific subdevice. This latter
+ * structure also stores :
+ * - the channels count;
+ * - the channels declaration mode (A4L_CHAN_GLOBAL_CHANDESC or
+     A4L_CHAN_PERCHAN_CHANDESC): if all the channels composing a
+     subdevice are identical, there is no need to declare the
+     parameters for each channel; the global declaration mode eases
+     the structure composition.
+ *
+ * Usually the channels descriptor looks like this:
+ * <tt> @verbatim
+struct a4l_channels_desc example_chan = {
+	mode: A4L_CHAN_GLOBAL_CHANDESC, -> Global declaration
+					      mode is set
+	length: 8, -> 8 channels
+	chans: {
+		{A4L_CHAN_AREF_GROUND, 16}, -> Each channel is 16 bits
+						  wide with the ground as
+						  reference
+	},
+};
+@endverbatim </tt>
+ *
+ * Ranges
+ *
+ * So as to perform conversion from logical values acquired by the
+ * device to physical units, some range structure(s) must be declared
+ * on the driver side.
+ *
+ * Such structures contain:
+ * - the physical unit type (Volt, Ampere, none);
+ * - the minimal and maximal values;
+ *
+ * These range structures must be associated with the channels at
+ * subdevice registration time as a channel can work with many
+ * ranges. At configuration time (thanks to an Analogy command), one
+ * range will be selected for each enabled channel.
+ *
+ * Consequently, for each channel, the developer must declare all the
+ * possible ranges in a structure called struct a4l_rngtab. Here is an
+ * example:
+ * <tt> @verbatim
+struct a4l_rngtab example_tab = {
+    length: 2,
+    rngs: {
+	RANGE_V(-5,5),
+	RANGE_V(-10,10),
+    },
+};
+@endverbatim </tt>
+ *
+ * For each subdevice, a specific structure is designed to gather all
+ * the ranges tabs of all the channels. In this structure, called
+ * struct a4l_rngdesc, three fields must be filled:
+ * - the declaration mode (A4L_RNG_GLOBAL_RNGDESC or
+ *   A4L_RNG_PERCHAN_RNGDESC);
+ * - the number of ranges tab;
+ * - the tab of ranges tabs pointers;
+ *
+ * Most of the time, the channels which belong to the same subdevice
+ * use the same set of ranges. So, there is no need to declare the
+ * same ranges for each channel. A macro is defined to prevent
+ * redundant declarations: RNG_GLOBAL().
+ *
+ * Here is an example:
+ * <tt> @verbatim
+struct a4l_rngdesc example_rng = RNG_GLOBAL(example_tab);
+@endverbatim </tt>
+ *
+ * @{
+ */
+
+
+/* --- Channel section --- */
+
+/*!
+ * @anchor A4L_CHAN_AREF_xxx @name Channel reference
+ * @brief Flags to define the channel's reference
+ * @{
+ */
+
+/**
+ * Ground reference
+ */
+#define A4L_CHAN_AREF_GROUND 0x1
+/**
+ * Common reference
+ */
+#define A4L_CHAN_AREF_COMMON 0x2
+/**
+ * Differential reference
+ */
+#define A4L_CHAN_AREF_DIFF 0x4
+/**
+ * Misc reference
+ */
+#define A4L_CHAN_AREF_OTHER 0x8
+
+	  /*! @} A4L_CHAN_AREF_xxx */
+
+/**
+ * Internal use flag (must not be used by driver developer)
+ */
+#define A4L_CHAN_GLOBAL 0x10
+
+/*!
+ * @brief Structure describing some channel's characteristics
+ */
+
+struct a4l_channel {
+	unsigned long flags; /*!< Channel flags to define the reference. */
+	unsigned long nb_bits; /*!< Channel resolution. */
+};
+
+/*!
+ * @anchor A4L_CHAN_xxx @name Channels declaration mode
+ * @brief Constant to define whether the channels in a descriptor are
+ * identical
+ * @{
+ */
+
+/**
+ * Global declaration, the set contains channels with similar
+ * characteristics
+ */
+#define A4L_CHAN_GLOBAL_CHANDESC 0
+/**
+ * Per channel declaration, the decriptor gathers differents channels
+ */
+#define A4L_CHAN_PERCHAN_CHANDESC 1
+
+	  /*! @} A4L_CHAN_xxx */
+
+/*!
+ * @brief Structure describing a channels set
+ */
+
+struct a4l_channels_desc {
+	unsigned long mode; /*!< Declaration mode (global or per channel) */
+	unsigned long length; /*!< Channels count */
+	struct a4l_channel chans[]; /*!< Channels tab */
+};
+
+/**
+ * Internal use flag (must not be used by driver developer)
+ */
+#define A4L_RNG_GLOBAL 0x8
+
+/*!
+ * @brief Structure describing a (unique) range
+ */
+
+struct a4l_range {
+	long min; /*!< Minimal value */
+	long max; /*!< Maximal falue */
+	unsigned long flags; /*!< Range flags (unit, etc.) */
+};
+
+/**
+ * Macro to declare a (unique) range with no unit defined
+ */
+#define RANGE(x,y) {(x * A4L_RNG_FACTOR), (y * A4L_RNG_FACTOR),	\
+			A4L_RNG_NO_UNIT}
+/**
+ * Macro to declare a (unique) range in Volt
+ */
+#define RANGE_V(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_VOLT_UNIT}
+/**
+ * Macro to declare a (unique) range in milliAmpere
+ */
+#define RANGE_mA(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_MAMP_UNIT}
+/**
+ * Macro to declare a (unique) range in some external reference
+ */
+#define RANGE_ext(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_EXT_UNIT}
+
+
+/* Ranges tab descriptor */
+struct a4l_rngtab {
+	unsigned char length;
+	struct a4l_range rngs[];
+};
+
+/**
+ * Constant to define a ranges descriptor as global (inter-channel)
+ */
+#define A4L_RNG_GLOBAL_RNGDESC 0
+/**
+ * Constant to define a ranges descriptor as specific for a channel
+ */
+#define A4L_RNG_PERCHAN_RNGDESC 1
+
+/* Global ranges descriptor */
+struct a4l_rngdesc {
+	unsigned char mode;
+	unsigned char length;
+	struct a4l_rngtab *rngtabs[];
+};
+
+/**
+ * Macro to declare a ranges global descriptor in one line
+ */
+#define RNG_GLOBAL(x) {			\
+	.mode = A4L_RNG_GLOBAL_RNGDESC,	\
+	.length =  1,			\
+	.rngtabs = {&(x)},		\
+}
+
+extern struct a4l_rngdesc a4l_range_bipolar10;
+extern struct a4l_rngdesc a4l_range_bipolar5;
+extern struct a4l_rngdesc a4l_range_unipolar10;
+extern struct a4l_rngdesc a4l_range_unipolar5;
+extern struct a4l_rngdesc a4l_range_unknown;
+extern struct a4l_rngdesc a4l_range_fake;
+
+#define range_digital a4l_range_unipolar5
+
+/*! @} channelrange */
+
+#endif /* !_COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H */
+++ linux-patched/include/xenomai/rtdm/analogy/driver.h	2022-03-21 12:58:31.805865402 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/device.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, driver facilities
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_DRIVER_H
+#define _COBALT_RTDM_ANALOGY_DRIVER_H
+
+#include <linux/list.h>
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/context.h>
+#include <rtdm/analogy/buffer.h>
+
+struct seq_file;
+struct a4l_link_desc;
+struct a4l_device;
+
+/** Structure containing driver declaration data.
+ *
+ *  @see rt_task_inquire()
+ */
+/* Analogy driver descriptor */
+struct a4l_driver {
+
+	/* List stuff */
+	struct list_head list;
+			   /**< List stuff */
+
+	/* Visible description stuff */
+	struct module *owner;
+	               /**< Pointer to module containing the code */
+	unsigned int flags;
+	               /**< Type / status driver's flags */
+	char *board_name;
+		       /**< Board name */
+	char *driver_name;
+	               /**< driver name */
+	int privdata_size;
+		       /**< Size of the driver's private data */
+
+	/* Init/destroy procedures */
+	int (*attach) (struct a4l_device *, struct a4l_link_desc *);
+								      /**< Attach procedure */
+	int (*detach) (struct a4l_device *);
+				   /**< Detach procedure */
+
+};
+
+/* Driver list related functions */
+
+int a4l_register_drv(struct a4l_driver * drv);
+int a4l_unregister_drv(struct a4l_driver * drv);
+int a4l_lct_drv(char *pin, struct a4l_driver ** pio);
+#ifdef CONFIG_PROC_FS
+int a4l_rdproc_drvs(struct seq_file *p, void *data);
+#endif /* CONFIG_PROC_FS */
+
+#endif /* !_COBALT_RTDM_ANALOGY_DRIVER_H */
+++ linux-patched/include/xenomai/rtdm/analogy/device.h	2022-03-21 12:58:31.797865480 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/buffer.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, device related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_DEVICE_H
+#define _COBALT_RTDM_ANALOGY_DEVICE_H
+
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/transfer.h>
+#include <rtdm/analogy/driver.h>
+
+#define A4L_NB_DEVICES 10
+
+#define A4L_DEV_ATTACHED_NR 0
+
+struct a4l_device {
+
+	/* Spinlock for global device use */
+	rtdm_lock_t lock;
+
+	/* Device specific flags */
+	unsigned long flags;
+
+	/* Driver assigned to this device thanks to attaching
+	   procedure */
+	struct a4l_driver *driver;
+
+	/* Hidden description stuff */
+	struct list_head subdvsq;
+
+	/* Context-dependent stuff */
+	struct a4l_transfer transfer;
+
+	/* Private data useful for drivers functioning */
+	void *priv;
+};
+
+/* --- Devices tab related functions --- */
+void a4l_init_devs(void);
+int a4l_check_cleanup_devs(void);
+int a4l_rdproc_devs(struct seq_file *p, void *data);
+
+/* --- Context related function / macro --- */
+void a4l_set_dev(struct a4l_device_context *cxt);
+#define a4l_get_dev(x) ((x)->dev)
+
+/* --- Upper layer functions --- */
+int a4l_ioctl_devcfg(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_devinfo(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_DEVICE_H */
+++ linux-patched/include/xenomai/rtdm/analogy/buffer.h	2022-03-21 12:58:31.790865548 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/command.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, buffer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_BUFFER_H
+#define _COBALT_RTDM_ANALOGY_BUFFER_H
+
+#include <linux/version.h>
+#include <linux/mm.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/analogy.h>
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/context.h>
+#include <rtdm/analogy/command.h>
+#include <rtdm/analogy/subdevice.h>
+
+/* --- Events bits / flags --- */
+
+#define A4L_BUF_EOBUF_NR 0
+#define A4L_BUF_EOBUF (1 << A4L_BUF_EOBUF_NR)
+
+#define A4L_BUF_ERROR_NR 1
+#define A4L_BUF_ERROR (1 << A4L_BUF_ERROR_NR)
+
+#define A4L_BUF_EOA_NR 2
+#define A4L_BUF_EOA (1 << A4L_BUF_EOA_NR)
+
+/* --- Status bits / flags --- */
+
+#define A4L_BUF_BULK_NR 8
+#define A4L_BUF_BULK (1 << A4L_BUF_BULK_NR)
+
+#define A4L_BUF_MAP_NR 9
+#define A4L_BUF_MAP (1 << A4L_BUF_MAP_NR)
+
+
+/* Buffer descriptor structure */
+struct a4l_buffer {
+
+	/* Added by the structure update */
+	struct a4l_subdevice *subd;
+
+	/* Buffer's first virtual page pointer */
+	void *buf;
+
+	/* Buffer's global size */
+	unsigned long size;
+	/* Tab containing buffer's pages pointers */
+	unsigned long *pg_list;
+
+	/* RT/NRT synchronization element */
+	struct a4l_sync sync;
+
+	/* Counters needed for transfer */
+	unsigned long end_count;
+	unsigned long prd_count;
+	unsigned long cns_count;
+	unsigned long tmp_count;
+
+	/* Status + events occuring during transfer */
+	unsigned long flags;
+
+	/* Command on progress */
+	struct a4l_cmd_desc *cur_cmd;
+
+	/* Munge counter */
+	unsigned long mng_count;
+
+	/* Theshold below which the user process should not be
+	   awakened */
+	unsigned long wake_count;
+};
+
+static inline void __dump_buffer_counters(struct a4l_buffer *buf)
+{
+	__a4l_dbg(1, core_dbg, "a4l_buffer=0x%p, p=0x%p \n", buf, buf->buf);
+	__a4l_dbg(1, core_dbg, "end=%06ld, prd=%06ld, cns=%06ld, tmp=%06ld \n",
+		buf->end_count, buf->prd_count, buf->cns_count, buf->tmp_count);
+}
+
+/* --- Static inline functions related with
+   user<->kernel data transfers --- */
+
+/* The function __produce is an inline function which copies data into
+   the asynchronous buffer and takes care of the non-contiguous issue
+   when looping. This function is used in read and write operations */
+static inline int __produce(struct a4l_device_context *cxt,
+			    struct a4l_buffer *buf, void *pin, unsigned long count)
+{
+	unsigned long start_ptr = (buf->prd_count % buf->size);
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	unsigned long tmp_cnt = count;
+	int ret = 0;
+
+	while (ret == 0 && tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the copy */
+		if (cxt == NULL)
+			memcpy(buf->buf + start_ptr, pin, blk_size);
+		else
+			ret = rtdm_safe_copy_from_user(fd,
+						       buf->buf + start_ptr,
+						       pin, blk_size);
+
+		/* Update pointers/counts */
+		pin += blk_size;
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+
+	return ret;
+}
+
+/* The function __consume is an inline function which copies data from
+   the asynchronous buffer and takes care of the non-contiguous issue
+   when looping. This function is used in read and write operations */
+static inline int __consume(struct a4l_device_context *cxt,
+			    struct a4l_buffer *buf, void *pout, unsigned long count)
+{
+	unsigned long start_ptr = (buf->cns_count % buf->size);
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	unsigned long tmp_cnt = count;
+	int ret = 0;
+
+	while (ret == 0 && tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the copy */
+		if (cxt == NULL)
+			memcpy(pout, buf->buf + start_ptr, blk_size);
+		else
+			ret = rtdm_safe_copy_to_user(fd,
+						     pout,
+						     buf->buf + start_ptr,
+						     blk_size);
+
+		/* Update pointers/counts */
+		pout += blk_size;
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+
+	return ret;
+}
+
+/* The function __munge is an inline function which calls the
+   subdevice specific munge callback on contiguous windows within the
+   whole buffer. This function is used in read and write operations */
+static inline void __munge(struct a4l_subdevice * subd,
+			   void (*munge) (struct a4l_subdevice *,
+					  void *, unsigned long),
+			   struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long start_ptr = (buf->mng_count % buf->size);
+	unsigned long tmp_cnt = count;
+
+	while (tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the munge operation */
+		munge(subd, buf->buf + start_ptr, blk_size);
+
+		/* Update the start pointer and the count */
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+}
+
+/* The function __handle_event can only be called from process context
+   (not interrupt service routine). It allows the client process to
+   retrieve the buffer status which has been updated by the driver */
+static inline int __handle_event(struct a4l_buffer * buf)
+{
+	int ret = 0;
+
+	/* The event "End of acquisition" must not be cleaned
+	   before the complete flush of the buffer */
+	if (test_bit(A4L_BUF_EOA_NR, &buf->flags))
+		ret = -ENOENT;
+
+	if (test_bit(A4L_BUF_ERROR_NR, &buf->flags))
+		ret = -EPIPE;
+
+	return ret;
+}
+
+/* --- Counters management functions --- */
+
+/* Here, we may wonder why we need more than two counters / pointers.
+
+   Theoretically, we only need two counters (or two pointers):
+   - one which tells where the reader should be within the buffer
+   - one which tells where the writer should be within the buffer
+
+   With these two counters (or pointers), we just have to check that
+   the writer does not overtake the reader inside the ring buffer
+   BEFORE any read / write operations.
+
+   However, if one element is a DMA controller, we have to be more
+   careful. Generally a DMA transfer occurs like this:
+   DMA shot
+      |-> then DMA interrupt
+	 |-> then DMA soft handler which checks the counter
+
+   So, the checkings occur AFTER the write operations.
+
+   Let's take an example: the reader is a software task and the writer
+   is a DMA controller. At the end of the DMA shot, the write counter
+   is higher than the read counter. Unfortunately, a read operation
+   occurs between the DMA shot and the DMA interrupt, so the handler
+   will not notice that an overflow occured.
+
+   That is why tmp_count comes into play: tmp_count records the
+   read/consumer current counter before the next DMA shot and once the
+   next DMA shot is done, we check that the updated writer/producer
+   counter is not higher than tmp_count. Thus we are sure that the DMA
+   writer has not overtaken the reader because it was not able to
+   overtake the n-1 value. */
+
+static inline int __pre_abs_put(struct a4l_buffer * buf, unsigned long count)
+{
+	if (count - buf->tmp_count > buf->size) {
+		set_bit(A4L_BUF_ERROR_NR, &buf->flags);
+		return -EPIPE;
+	}
+
+	buf->tmp_count = buf->cns_count;
+
+	return 0;
+}
+
+static inline int __pre_put(struct a4l_buffer * buf, unsigned long count)
+{
+	return __pre_abs_put(buf, buf->tmp_count + count);
+}
+
+static inline int __pre_abs_get(struct a4l_buffer * buf, unsigned long count)
+{
+	/* The first time, we expect the buffer to be properly filled
+	before the trigger occurence; by the way, we need tmp_count to
+	have been initialized and tmp_count is updated right here */
+	if (buf->tmp_count == 0 || buf->cns_count == 0)
+		goto out;
+
+	/* At the end of the acquisition, the user application has
+	written the defined amount of data into the buffer; so the
+	last time, the DMA channel can easily overtake the tmp
+	frontier because no more data were sent from user space;
+	therefore no useless alarm should be sent */
+	if (buf->end_count != 0 && (long)(count - buf->end_count) > 0)
+		goto out;
+
+	/* Once the exception are passed, we check that the DMA
+	transfer has not overtaken the last record of the production
+	count (tmp_count was updated with prd_count the last time
+	__pre_abs_get was called). We must understand that we cannot
+	compare the current DMA count with the current production
+	count because even if, right now, the production count is
+	higher than the DMA count, it does not mean that the DMA count
+	was not greater a few cycles before; in such case, the DMA
+	channel would have retrieved the wrong data */
+	if ((long)(count - buf->tmp_count) > 0) {
+		set_bit(A4L_BUF_ERROR_NR, &buf->flags);
+		return -EPIPE;
+	}
+
+out:
+	buf->tmp_count = buf->prd_count;
+
+	return 0;
+}
+
+static inline int __pre_get(struct a4l_buffer * buf, unsigned long count)
+{
+	return __pre_abs_get(buf, buf->tmp_count + count);
+}
+
+static inline int __abs_put(struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long old = buf->prd_count;
+
+	if ((long)(buf->prd_count - count) >= 0)
+		return -EINVAL;
+
+	buf->prd_count = count;
+
+	if ((old / buf->size) != (count / buf->size))
+		set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
+
+	if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
+		set_bit(A4L_BUF_EOA_NR, &buf->flags);
+
+	return 0;
+}
+
+static inline int __put(struct a4l_buffer * buf, unsigned long count)
+{
+	return __abs_put(buf, buf->prd_count + count);
+}
+
+static inline int __abs_get(struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long old = buf->cns_count;
+
+	if ((long)(buf->cns_count - count) >= 0)
+		return -EINVAL;
+
+	buf->cns_count = count;
+
+	if ((old / buf->size) != count / buf->size)
+		set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
+
+	if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
+		set_bit(A4L_BUF_EOA_NR, &buf->flags);
+
+	return 0;
+}
+
+static inline int __get(struct a4l_buffer * buf, unsigned long count)
+{
+	return __abs_get(buf, buf->cns_count + count);
+}
+
+static inline unsigned long __count_to_put(struct a4l_buffer * buf)
+{
+	unsigned long ret;
+
+	if ((long) (buf->size + buf->cns_count - buf->prd_count) > 0)
+		ret = buf->size + buf->cns_count - buf->prd_count;
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static inline unsigned long __count_to_get(struct a4l_buffer * buf)
+{
+	unsigned long ret;
+
+	/* If the acquisition is unlimited (end_count == 0), we must
+	   not take into account end_count */
+	if (buf->end_count == 0 || (long)(buf->end_count - buf->prd_count) > 0)
+		ret = buf->prd_count;
+	else
+		ret = buf->end_count;
+
+	if ((long)(ret - buf->cns_count) > 0)
+		ret -= buf->cns_count;
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static inline unsigned long __count_to_end(struct a4l_buffer * buf)
+{
+	unsigned long ret = buf->end_count - buf->cns_count;
+
+	if (buf->end_count == 0)
+		return ULONG_MAX;
+
+	return ((long)ret) < 0 ? 0 : ret;
+}
+
+/* --- Buffer internal functions --- */
+
+int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size);
+
+void a4l_free_buffer(struct a4l_buffer *buf_desc);
+
+void a4l_init_buffer(struct a4l_buffer * buf_desc);
+
+void a4l_cleanup_buffer(struct a4l_buffer * buf_desc);
+
+int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd);
+
+void a4l_cancel_buffer(struct a4l_device_context *cxt);
+
+int a4l_buf_prepare_absput(struct a4l_subdevice *subd,
+			   unsigned long count);
+
+int a4l_buf_commit_absput(struct a4l_subdevice *subd,
+			  unsigned long count);
+
+int a4l_buf_prepare_put(struct a4l_subdevice *subd,
+			unsigned long count);
+
+int a4l_buf_commit_put(struct a4l_subdevice *subd,
+		       unsigned long count);
+
+int a4l_buf_put(struct a4l_subdevice *subd,
+		void *bufdata, unsigned long count);
+
+int a4l_buf_prepare_absget(struct a4l_subdevice *subd,
+			   unsigned long count);
+
+int a4l_buf_commit_absget(struct a4l_subdevice *subd,
+			  unsigned long count);
+
+int a4l_buf_prepare_get(struct a4l_subdevice *subd,
+			unsigned long count);
+
+int a4l_buf_commit_get(struct a4l_subdevice *subd,
+		       unsigned long count);
+
+int a4l_buf_get(struct a4l_subdevice *subd,
+		void *bufdata, unsigned long count);
+
+int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts);
+
+unsigned long a4l_buf_count(struct a4l_subdevice *subd);
+
+/* --- Current Command management function --- */
+
+static inline struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice *subd)
+{
+	return (subd->buf) ? subd->buf->cur_cmd : NULL;
+}
+
+/* --- Munge related function --- */
+
+int a4l_get_chan(struct a4l_subdevice *subd);
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_mmap(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufcfg(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufcfg2(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufinfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufinfo2(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_poll(struct a4l_device_context * cxt, void *arg);
+ssize_t a4l_read_buffer(struct a4l_device_context * cxt, void *bufdata, size_t nbytes);
+ssize_t a4l_write_buffer(struct a4l_device_context * cxt, const void *bufdata, size_t nbytes);
+int a4l_select(struct a4l_device_context *cxt,
+	       rtdm_selector_t *selector,
+	       enum rtdm_selecttype type, unsigned fd_index);
+
+#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */
+++ linux-patched/include/xenomai/rtdm/analogy/command.h	2022-03-21 12:58:31.783865616 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/transfer.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_COMMAND_H
+#define _COBALT_RTDM_ANALOGY_COMMAND_H
+
+#include <rtdm/uapi/analogy.h>
+#include <rtdm/analogy/context.h>
+
+#define CR_CHAN(a) CHAN(a)
+#define CR_RNG(a) (((a)>>16)&0xff)
+#define CR_AREF(a) (((a)>>24)&0xf)
+
+/* --- Command related function --- */
+void a4l_free_cmddesc(struct a4l_cmd_desc * desc);
+
+/* --- Upper layer functions --- */
+int a4l_check_cmddesc(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc);
+int a4l_ioctl_cmd(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_COMMAND_H */
+++ linux-patched/include/xenomai/rtdm/analogy/transfer.h	2022-03-21 12:58:31.775865694 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/can.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, transfer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_TRANSFER_H
+#define _COBALT_RTDM_ANALOGY_TRANSFER_H
+
+#include <rtdm/analogy/buffer.h>
+
+/* IRQ types */
+#define A4L_IRQ_DISABLED 0
+
+/* Fields init values */
+#define A4L_IRQ_UNUSED (unsigned int)((unsigned short)(~0))
+#define A4L_IDX_UNUSED (unsigned int)(~0)
+
+/* TODO: IRQ handling must leave transfer for os_facilities */
+
+struct a4l_device;
+/* Analogy transfer descriptor */
+struct a4l_transfer {
+
+	/* Subdevices desc */
+	unsigned int nb_subd;
+	struct a4l_subdevice **subds;
+
+	/* Buffer stuff: the default size */
+	unsigned int default_bufsize;
+
+	/* IRQ in use */
+	/* TODO: irq_desc should vanish */
+	struct a4l_irq_descriptor irq_desc;
+};
+
+/* --- Proc function --- */
+
+int a4l_rdproc_transfer(struct seq_file *p, void *data);
+
+/* --- Upper layer functions --- */
+
+void a4l_presetup_transfer(struct a4l_device_context * cxt);
+int a4l_setup_transfer(struct a4l_device_context * cxt);
+int a4l_precleanup_transfer(struct a4l_device_context * cxt);
+int a4l_cleanup_transfer(struct a4l_device_context * cxt);
+int a4l_reserve_transfer(struct a4l_device_context * cxt, int idx_subd);
+int a4l_init_transfer(struct a4l_device_context * cxt, struct a4l_cmd_desc * cmd);
+int a4l_cancel_transfer(struct a4l_device_context * cxt, int idx_subd);
+int a4l_cancel_transfers(struct a4l_device_context * cxt);
+
+ssize_t a4l_put(struct a4l_device_context * cxt, void *buf, size_t nbytes);
+ssize_t a4l_get(struct a4l_device_context * cxt, void *buf, size_t nbytes);
+
+int a4l_request_irq(struct a4l_device *dev,
+		    unsigned int irq,
+		    a4l_irq_hdlr_t handler,
+		    unsigned long flags, void *cookie);
+int a4l_free_irq(struct a4l_device *dev, unsigned int irq);
+unsigned int a4l_get_irq(struct a4l_device *dev);
+
+int a4l_ioctl_cancel(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_TRANSFER_H */
+++ linux-patched/include/xenomai/rtdm/can.h	2022-03-21 12:58:31.768865763 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/net.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                    <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_CAN_H
+#define _COBALT_RTDM_CAN_H
+
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/if.h>
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/can.h>
+
+#endif /* _COBALT_RTDM_CAN_H */
+++ linux-patched/include/xenomai/rtdm/net.h	2022-03-21 12:58:31.761865831 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/autotune.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _COBALT_RTDM_NET_H
+#define _COBALT_RTDM_NET_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/net.h>
+#include <rtdm/driver.h>
+
+struct rtnet_callback {
+    void    (*func)(struct rtdm_fd *, void *);
+    void    *arg;
+};
+
+#define RTNET_RTIOC_CALLBACK    _IOW(RTIOC_TYPE_NETWORK, 0x12, \
+				     struct rtnet_callback)
+
+/* utility functions */
+
+/* provided by rt_ipv4 */
+unsigned long rt_inet_aton(const char *ip);
+
+/* provided by rt_packet */
+int rt_eth_aton(unsigned char *addr_buf, const char *mac);
+
+#define RTNET_RTDM_VER 914
+
+#endif  /* _COBALT_RTDM_NET_H */
+++ linux-patched/include/xenomai/rtdm/autotune.h	2022-03-21 12:58:31.753865909 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/sched.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_AUTOTUNE_H
+#define _COBALT_RTDM_AUTOTUNE_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/autotune.h>
+
+#endif /* !_COBALT_RTDM_AUTOTUNE_H */
+++ linux-patched/include/xenomai/cobalt/uapi/sched.h	2022-03-21 12:58:32.206861492 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/mutex.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SCHED_H
+#define _COBALT_UAPI_SCHED_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define SCHED_COBALT		42
+#define SCHED_WEAK		43
+
+#ifndef SCHED_SPORADIC
+#define SCHED_SPORADIC		10
+#define sched_ss_low_priority	sched_u.ss.__sched_low_priority
+#define sched_ss_repl_period	sched_u.ss.__sched_repl_period
+#define sched_ss_init_budget	sched_u.ss.__sched_init_budget
+#define sched_ss_max_repl	sched_u.ss.__sched_max_repl
+#endif	/* !SCHED_SPORADIC */
+
+struct __sched_ss_param {
+	int __sched_low_priority;
+	struct __user_old_timespec __sched_repl_period;
+	struct __user_old_timespec __sched_init_budget;
+	int __sched_max_repl;
+};
+
+#define sched_rr_quantum	sched_u.rr.__sched_rr_quantum
+
+struct __sched_rr_param {
+	struct __user_old_timespec __sched_rr_quantum;
+};
+
+#ifndef SCHED_TP
+#define SCHED_TP		11
+#define sched_tp_partition	sched_u.tp.__sched_partition
+#endif	/* !SCHED_TP */
+
+struct __sched_tp_param {
+	int __sched_partition;
+};
+
+struct sched_tp_window {
+	struct __user_old_timespec offset;
+	struct __user_old_timespec duration;
+	int ptid;
+};
+
+enum {
+	sched_tp_install,
+	sched_tp_uninstall,
+	sched_tp_start,
+	sched_tp_stop,
+};
+	
+struct __sched_config_tp {
+	int op;
+	int nr_windows;
+	struct sched_tp_window windows[0];
+};
+
+#define sched_tp_confsz(nr_win) \
+  (sizeof(struct __sched_config_tp) + nr_win * sizeof(struct sched_tp_window))
+
+#ifndef SCHED_QUOTA
+#define SCHED_QUOTA		12
+#define sched_quota_group	sched_u.quota.__sched_group
+#endif	/* !SCHED_QUOTA */
+
+struct __sched_quota_param {
+	int __sched_group;
+};
+
+enum {
+	sched_quota_add,
+	sched_quota_remove,
+	sched_quota_force_remove,
+	sched_quota_set,
+	sched_quota_get,
+};
+
+struct __sched_config_quota {
+	int op;
+	union {
+		struct {
+			int pshared;
+		} add;
+		struct {
+			int tgid;
+		} remove;
+		struct {
+			int tgid;
+			int quota;
+			int quota_peak;
+		} set;
+		struct {
+			int tgid;
+		} get;
+	};
+	struct __sched_quota_info {
+		int tgid;
+		int quota;
+		int quota_peak;
+		int quota_sum;
+	} info;
+};
+
+#define sched_quota_confsz()  sizeof(struct __sched_config_quota)
+
+struct sched_param_ex {
+	int sched_priority;
+	union {
+		struct __sched_ss_param ss;
+		struct __sched_rr_param rr;
+		struct __sched_tp_param tp;
+		struct __sched_quota_param quota;
+	} sched_u;
+};
+
+union sched_config {
+	struct __sched_config_tp tp;
+	struct __sched_config_quota quota;
+};
+
+#endif /* !_COBALT_UAPI_SCHED_H */
+++ linux-patched/include/xenomai/cobalt/uapi/mutex.h	2022-03-21 12:58:32.199861560 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/synch.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_MUTEX_H
+#define _COBALT_UAPI_MUTEX_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define COBALT_MUTEX_MAGIC  0x86860303
+
+struct cobalt_mutex_state {
+	atomic_t owner;
+	__u32 flags;
+#define COBALT_MUTEX_COND_SIGNAL 0x00000001
+#define COBALT_MUTEX_ERRORCHECK  0x00000002
+	__u32 ceiling;
+};
+
+union cobalt_mutex_union {
+	pthread_mutex_t native_mutex;
+	struct cobalt_mutex_shadow {
+		__u32 magic;
+		__u32 lockcnt;
+		__u32 state_offset;
+		xnhandle_t handle;
+		struct cobalt_mutexattr attr;
+	} shadow_mutex;
+};
+
+#endif /* !_COBALT_UAPI_MUTEX_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/synch.h	2022-03-21 12:58:32.192861628 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/limits.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2008, 2009 Jan Kiszka <jan.kiszka@siemens.com>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_SYNCH_H
+#define _COBALT_UAPI_KERNEL_SYNCH_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+/* Creation flags */
+#define XNSYNCH_FIFO    0x0
+#define XNSYNCH_PRIO    0x1
+#define XNSYNCH_PI      0x2
+#define XNSYNCH_DREORD  0x4
+#define XNSYNCH_OWNER   0x8
+#define XNSYNCH_PP      0x10
+
+/* Fast lock API */
+static inline int xnsynch_fast_is_claimed(xnhandle_t handle)
+{
+	return (handle & XNSYNCH_FLCLAIM) != 0;
+}
+
+static inline xnhandle_t xnsynch_fast_claimed(xnhandle_t handle)
+{
+	return handle | XNSYNCH_FLCLAIM;
+}
+
+static inline xnhandle_t xnsynch_fast_ceiling(xnhandle_t handle)
+{
+	return handle | XNSYNCH_FLCEIL;
+}
+
+static inline int
+xnsynch_fast_owner_check(atomic_t *fastlock, xnhandle_t ownerh)
+{
+	return (xnhandle_get_id(atomic_read(fastlock)) == ownerh) ?
+		0 : -EPERM;
+}
+
+static inline
+int xnsynch_fast_acquire(atomic_t *fastlock, xnhandle_t new_ownerh)
+{
+	xnhandle_t h;
+
+	h = atomic_cmpxchg(fastlock, XN_NO_HANDLE, new_ownerh);
+	if (h != XN_NO_HANDLE) {
+		if (xnhandle_get_id(h) == new_ownerh)
+			return -EBUSY;
+
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static inline
+int xnsynch_fast_release(atomic_t *fastlock, xnhandle_t cur_ownerh)
+{
+	return (xnhandle_t)atomic_cmpxchg(fastlock, cur_ownerh, XN_NO_HANDLE)
+		== cur_ownerh;
+}
+
+/* Local/shared property */
+static inline int xnsynch_is_shared(xnhandle_t handle)
+{
+	return (handle & XNSYNCH_PSHARED) != 0;
+}
+
+#endif /* !_COBALT_UAPI_KERNEL_SYNCH_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/limits.h	2022-03-21 12:58:32.184861706 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/types.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_LIMITS_H
+#define _COBALT_UAPI_KERNEL_LIMITS_H
+
+#define XNOBJECT_NAME_LEN 32
+
+#endif /* !_COBALT_UAPI_KERNEL_LIMITS_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/types.h	2022-03-21 12:58:32.177861774 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/urw.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_TYPES_H
+#define _COBALT_UAPI_KERNEL_TYPES_H
+
+#include <linux/types.h>
+#include <cobalt/uapi/kernel/limits.h>
+
+typedef __u64 xnticks_t;
+
+typedef __s64 xnsticks_t;
+
+typedef __u32 xnhandle_t;
+
+#define XN_NO_HANDLE		((xnhandle_t)0)
+#define XN_HANDLE_INDEX_MASK	((xnhandle_t)0xf0000000)
+
+/* Fixed bits (part of the identifier) */
+#define XNSYNCH_PSHARED		((xnhandle_t)0x40000000)
+
+/* Transient bits (expressing a status) */
+#define XNSYNCH_FLCLAIM		((xnhandle_t)0x80000000) /* Contended. */
+#define XNSYNCH_FLCEIL		((xnhandle_t)0x20000000) /* Ceiling active. */
+
+#define XN_HANDLE_TRANSIENT_MASK	(XNSYNCH_FLCLAIM|XNSYNCH_FLCEIL)
+
+/*
+ * Strip all special bits from the handle, only retaining the object
+ * index value in the registry.
+ */
+static inline xnhandle_t xnhandle_get_index(xnhandle_t handle)
+{
+	return handle & ~XN_HANDLE_INDEX_MASK;
+}
+
+/*
+ * Strip the transient bits from the handle, only retaining the fixed
+ * part making the identifier.
+ */
+static inline xnhandle_t xnhandle_get_id(xnhandle_t handle)
+{
+	return handle & ~XN_HANDLE_TRANSIENT_MASK;
+}
+
+/*
+ * Our representation of time specs at the kernel<->user interface
+ * boundary at the moment, until we have fully transitioned to a
+ * y2038-safe implementation in libcobalt. Once done, those legacy
+ * types will be removed.
+ */
+struct __user_old_timespec {
+	long  tv_sec;
+	long  tv_nsec;
+};
+
+struct __user_old_itimerspec {
+	struct __user_old_timespec it_interval;
+	struct __user_old_timespec it_value;
+};
+
+struct __user_old_timeval {
+	long  tv_sec;
+	long  tv_usec;
+};
+
+/* Lifted from include/uapi/linux/timex.h. */
+struct __user_old_timex {
+	unsigned int modes;	/* mode selector */
+	__kernel_long_t offset;	/* time offset (usec) */
+	__kernel_long_t freq;	/* frequency offset (scaled ppm) */
+	__kernel_long_t maxerror;/* maximum error (usec) */
+	__kernel_long_t esterror;/* estimated error (usec) */
+	int status;		/* clock command/status */
+	__kernel_long_t constant;/* pll time constant */
+	__kernel_long_t precision;/* clock precision (usec) (read only) */
+	__kernel_long_t tolerance;/* clock frequency tolerance (ppm)
+				   * (read only)
+				   */
+	struct __user_old_timeval time;	/* (read only, except for ADJ_SETOFFSET) */
+	__kernel_long_t tick;	/* (modified) usecs between clock ticks */
+
+	__kernel_long_t ppsfreq;/* pps frequency (scaled ppm) (ro) */
+	__kernel_long_t jitter; /* pps jitter (us) (ro) */
+	int shift;              /* interval duration (s) (shift) (ro) */
+	__kernel_long_t stabil;            /* pps stability (scaled ppm) (ro) */
+	__kernel_long_t jitcnt; /* jitter limit exceeded (ro) */
+	__kernel_long_t calcnt; /* calibration intervals (ro) */
+	__kernel_long_t errcnt; /* calibration errors (ro) */
+	__kernel_long_t stbcnt; /* stability limit exceeded (ro) */
+
+	int tai;		/* TAI offset (ro) */
+
+	int  :32; int  :32; int  :32; int  :32;
+	int  :32; int  :32; int  :32; int  :32;
+	int  :32; int  :32; int  :32;
+};
+
+#endif /* !_COBALT_UAPI_KERNEL_TYPES_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/urw.h	2022-03-21 12:58:32.170861843 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/vdso.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_URW_H
+#define _COBALT_UAPI_KERNEL_URW_H
+
+#include <linux/types.h>
+
+/*
+ * A restricted version of the kernel seqlocks with a slightly
+ * different interface, allowing for unsynced reads with concurrent
+ * write detection, without serializing writers.  Caller should
+ * provide for proper locking to deal with concurrent updates.
+ *
+ * urw_t lock = URW_INITIALIZER;
+ * urwstate_t tmp;
+ *
+ * unsynced_read_block(&tmp, &lock) {
+ *          (will redo until clean read)...
+ * }
+ *
+ * unsynced_write_block(&tmp, &lock) {
+ *          ...
+ * }
+ *
+ * This code was inspired by Wolfgang Mauerer's linux/seqlock.h
+ * adaptation for Xenomai 2.6 to support the VDSO feature.
+ */
+
+typedef struct {
+	__u32 sequence;
+} urw_t;
+
+typedef struct {
+	__u32 token;
+	__u32 dirty;
+} urwstate_t;
+
+#define URW_INITIALIZER     { 0 }
+#define DEFINE_URW(__name)  urw_t __name = URW_INITIALIZER
+
+#ifndef READ_ONCE
+#define READ_ONCE ACCESS_ONCE
+#endif
+
+static inline void __try_read_start(const urw_t *urw, urwstate_t *tmp)
+{
+	__u32 token;
+repeat:
+	token = READ_ONCE(urw->sequence);
+	smp_rmb();
+	if (token & 1) {
+		cpu_relax();
+		goto repeat;
+	}
+
+	tmp->token = token;
+	tmp->dirty = 1;
+}
+
+static inline void __try_read_end(const urw_t *urw, urwstate_t *tmp)
+{
+	smp_rmb();
+	if (urw->sequence != tmp->token) {
+		__try_read_start(urw, tmp);
+		return;
+	}
+
+	tmp->dirty = 0;
+}
+
+static inline void __do_write_start(urw_t *urw, urwstate_t *tmp)
+{
+	urw->sequence++;
+	tmp->dirty = 1;
+	smp_wmb();
+}
+
+static inline void __do_write_end(urw_t *urw, urwstate_t *tmp)
+{
+	smp_wmb();
+	tmp->dirty = 0;
+	urw->sequence++;
+}
+
+static inline void unsynced_rw_init(urw_t *urw)
+{
+	urw->sequence = 0;
+}
+
+#define unsynced_read_block(__tmp, __urw)		\
+	for (__try_read_start(__urw, __tmp);		\
+	     (__tmp)->dirty; __try_read_end(__urw, __tmp))
+
+#define unsynced_write_block(__tmp, __urw)		\
+	for (__do_write_start(__urw, __tmp);		\
+	     (__tmp)->dirty; __do_write_end(__urw, __tmp))
+
+#endif /* !_COBALT_UAPI_KERNEL_URW_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/vdso.h	2022-03-21 12:58:32.162861921 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/pipe.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Wolfgang Mauerer <wolfgang.mauerer@siemens.com>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_VDSO_H
+#define _COBALT_UAPI_KERNEL_VDSO_H
+
+#include <cobalt/uapi/kernel/urw.h>
+
+/*
+ * I-pipe only. Dovetail enables the common vDSO for getting
+ * CLOCK_REALTIME timestamps from the out-of-band stage
+ * (XNVDSO_FEAT_HOST_REALTIME is cleared in this case).
+ */
+struct xnvdso_hostrt_data {
+	__u64 wall_sec;
+	__u64 wtom_sec;
+	__u64 cycle_last;
+	__u64 mask;
+	__u32 wall_nsec;
+	__u32 wtom_nsec;
+	__u32 mult;
+	__u32 shift;
+	__u32 live;
+	urw_t lock;
+};
+
+/*
+ * Data shared between the Cobalt kernel and applications, which lives
+ * in the shared memory heap (COBALT_MEMDEV_SHARED).
+ * xnvdso_hostrt_data.features tells which data is present. Notice
+ * that struct xnvdso may only grow, but never shrink.
+ */
+struct xnvdso {
+	__u64 features;
+	/* XNVDSO_FEAT_HOST_REALTIME */
+	struct xnvdso_hostrt_data hostrt_data;
+	/* XNVDSO_FEAT_WALLCLOCK_OFFSET */
+	__u64 wallclock_offset;
+};
+
+/* For each shared feature, add a flag below. */
+
+#define XNVDSO_FEAT_HOST_REALTIME	0x0000000000000001ULL
+#define XNVDSO_FEAT_WALLCLOCK_OFFSET	0x0000000000000002ULL
+
+static inline int xnvdso_test_feature(struct xnvdso *vdso,
+				      __u64 feature)
+{
+	return (vdso->features & feature) != 0;
+}
+
+#endif /* !_COBALT_UAPI_KERNEL_VDSO_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/pipe.h	2022-03-21 12:58:32.155861989 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_PIPE_H
+#define _COBALT_UAPI_KERNEL_PIPE_H
+
+#define	XNPIPE_IOCTL_BASE	'p'
+
+#define XNPIPEIOC_GET_NRDEV	_IOW(XNPIPE_IOCTL_BASE, 0, int)
+#define XNPIPEIOC_IFLUSH	_IO(XNPIPE_IOCTL_BASE, 1)
+#define XNPIPEIOC_OFLUSH	_IO(XNPIPE_IOCTL_BASE, 2)
+#define XNPIPEIOC_FLUSH		XNPIPEIOC_OFLUSH
+#define XNPIPEIOC_SETSIG	_IO(XNPIPE_IOCTL_BASE, 3)
+
+#define XNPIPE_NORMAL	0x0
+#define XNPIPE_URGENT	0x1
+
+#define XNPIPE_IFLUSH	0x1
+#define XNPIPE_OFLUSH	0x2
+
+#define XNPIPE_MINOR_AUTO  (-1)
+
+#endif /* !_COBALT_UAPI_KERNEL_PIPE_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/thread.h	2022-03-21 12:58:32.147862067 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/heap.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_THREAD_H
+#define _COBALT_UAPI_KERNEL_THREAD_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_thread_states Thread state flags
+ * @brief Bits reporting permanent or transient states of threads
+ * @{
+ */
+
+/* State flags (shared) */
+
+#define XNSUSP    0x00000001 /**< Suspended. */
+#define XNPEND    0x00000002 /**< Sleep-wait for a resource. */
+#define XNDELAY   0x00000004 /**< Delayed */
+#define XNREADY   0x00000008 /**< Linked to the ready queue. */
+#define XNDORMANT 0x00000010 /**< Not started yet */
+#define XNZOMBIE  0x00000020 /**< Zombie thread in deletion process */
+#define XNMAPPED  0x00000040 /**< Thread is mapped to a linux task */
+#define XNRELAX   0x00000080 /**< Relaxed shadow thread (blocking bit) */
+#define XNHELD    0x00000200 /**< Thread is held to process emergency. */
+#define XNBOOST   0x00000400 /**< PI/PP boost undergoing */
+#define XNSSTEP   0x00000800 /**< Single-stepped by debugger */
+#define XNLOCK    0x00001000 /**< Scheduler lock control (pseudo-bit, not in ->state) */
+#define XNRRB     0x00002000 /**< Undergoes a round-robin scheduling */
+#define XNWARN    0x00004000 /**< Issue SIGDEBUG on error detection */
+#define XNFPU     0x00008000 /**< Thread uses FPU */
+#define XNROOT    0x00010000 /**< Root thread (that is, Linux/IDLE) */
+#define XNWEAK    0x00020000 /**< Non real-time shadow (from the WEAK class) */
+#define XNUSER    0x00040000 /**< Shadow thread running in userland */
+#define XNJOINED  0x00080000 /**< Another thread waits for joining this thread */
+#define XNTRAPLB  0x00100000 /**< Trap lock break (i.e. may not sleep with sched lock) */
+#define XNDEBUG   0x00200000 /**< User-level debugging enabled */
+#define XNDBGSTOP 0x00400000 /**< Stopped for synchronous debugging */
+
+/** @} */
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_thread_info Thread information flags
+ * @brief Bits reporting events notified to threads
+ * @{
+ */
+
+/* Information flags (shared) */
+
+#define XNTIMEO   0x00000001 /**< Woken up due to a timeout condition */
+#define XNRMID    0x00000002 /**< Pending on a removed resource */
+#define XNBREAK   0x00000004 /**< Forcibly awaken from a wait state */
+#define XNKICKED  0x00000008 /**< Forced out of primary mode */
+#define XNWAKEN   0x00000010 /**< Thread waken up upon resource availability */
+#define XNROBBED  0x00000020 /**< Robbed from resource ownership */
+#define XNCANCELD 0x00000040 /**< Cancellation request is pending */
+#define XNPIALERT 0x00000080 /**< Priority inversion alert (SIGDEBUG sent) */
+#define XNSCHEDP  0x00000100 /**< schedparam propagation is pending */
+#define XNCONTHI  0x00000200 /**< Continue in primary mode after debugging */
+
+/* Local information flags (private to current thread) */
+
+#define XNMOVED   0x00000001 /**< CPU migration in primary mode occurred */
+#define XNLBALERT 0x00000002 /**< Scheduler lock break alert (SIGDEBUG sent) */
+#define XNDESCENT 0x00000004 /**< Adaptive transitioning to secondary mode */
+#define XNSYSRST  0x00000008 /**< Thread awaiting syscall restart after signal */
+#define XNHICCUP  0x00000010 /**< Just left from ptracing */
+
+/** @} */
+
+/*
+ * Must follow strictly the declaration order of the state flags
+ * defined above. Status symbols are defined as follows:
+ *
+ * 'S' -> Forcibly suspended.
+ * 'w'/'W' -> Waiting for a resource, with or without timeout.
+ * 'D' -> Delayed (without any other wait condition).
+ * 'R' -> Runnable.
+ * 'U' -> Unstarted or dormant.
+ * 'X' -> Relaxed shadow.
+ * 'H' -> Held in emergency.
+ * 'b' -> Priority boost undergoing.
+ * 'T' -> Ptraced and stopped.
+ * 'l' -> Locks scheduler.
+ * 'r' -> Undergoes round-robin.
+ * 't' -> Runtime mode errors notified.
+ * 'L' -> Lock breaks trapped.
+ * 's' -> Ptraced, stopped synchronously.
+ */
+#define XNTHREAD_STATE_LABELS  "SWDRU..X.HbTlrt.....L.s"
+
+struct xnthread_user_window {
+	__u32 state;
+	__u32 info;
+	__u32 grant_value;
+	__u32 pp_pending;
+};
+
+#endif /* !_COBALT_UAPI_KERNEL_THREAD_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/heap.h	2022-03-21 12:58:32.140862135 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/trace.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_HEAP_H
+#define _COBALT_UAPI_KERNEL_HEAP_H
+
+#include <linux/types.h>
+
+#define COBALT_MEMDEV_PRIVATE  "memdev-private"
+#define COBALT_MEMDEV_SHARED   "memdev-shared"
+#define COBALT_MEMDEV_SYS      "memdev-sys"
+
+struct cobalt_memdev_stat {
+	__u32 size;
+	__u32 free;
+};
+
+#define MEMDEV_RTIOC_STAT	_IOR(RTDM_CLASS_MEMORY, 0, struct cobalt_memdev_stat)
+
+#endif /* !_COBALT_UAPI_KERNEL_HEAP_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/trace.h	2022-03-21 12:58:32.133862204 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/signal.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_TRACE_H
+#define _COBALT_UAPI_KERNEL_TRACE_H
+
+#define __xntrace_op_max_begin		0
+#define __xntrace_op_max_end		1
+#define __xntrace_op_max_reset		2
+#define __xntrace_op_user_start		3
+#define __xntrace_op_user_stop		4
+#define __xntrace_op_user_freeze	5
+#define __xntrace_op_special		6
+#define __xntrace_op_special_u64	7
+#define __xntrace_op_latpeak_freeze	8
+
+#endif /* !_COBALT_UAPI_KERNEL_TRACE_H */
+++ linux-patched/include/xenomai/cobalt/uapi/signal.h	2022-03-21 12:58:32.125862281 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/sem.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SIGNAL_H
+#define _COBALT_UAPI_SIGNAL_H
+
+/*
+ * Those are pseudo-signals only available with pthread_kill() to
+ * suspend/resume/unblock threads synchronously, force them out of
+ * primary mode or even demote them to the SCHED_OTHER class via the
+ * low-level nucleus interface. Can't block those signals, queue them,
+ * or even set them in a sigset. Those are nasty, strictly anti-POSIX
+ * things; we do provide them nevertheless only because we are mean
+ * people doing harmful code for no valid reason. Can't go against
+ * your nature, right?  Nah... (this said, don't blame us for POSIX,
+ * we are not _that_ mean).
+ */
+#define SIGSUSP (SIGRTMAX + 1)
+#define SIGRESM (SIGRTMAX + 2)
+#define SIGRELS (SIGRTMAX + 3)
+#define SIGKICK (SIGRTMAX + 4)
+#define SIGDEMT (SIGRTMAX + 5)
+
+/*
+ * Regular POSIX signals with specific handling by Xenomai.
+ */
+#define SIGSHADOW			SIGWINCH
+#define sigshadow_action(code)		((code) & 0xff)
+#define sigshadow_arg(code)		(((code) >> 8) & 0xff)
+#define sigshadow_int(action, arg)	((action) | ((arg) << 8))
+
+/* SIGSHADOW action codes. */
+#define SIGSHADOW_ACTION_HARDEN		1
+#define SIGSHADOW_ACTION_BACKTRACE	2
+#define SIGSHADOW_ACTION_HOME		3
+#define SIGSHADOW_BACKTRACE_DEPTH	16
+
+#define SIGDEBUG			SIGXCPU
+#define sigdebug_code(si)		((si)->si_value.sival_int)
+#define sigdebug_reason(si)		(sigdebug_code(si) & 0xff)
+#define sigdebug_marker			0xfccf0000
+#define sigdebug_marked(si)		\
+	((sigdebug_code(si) & 0xffff0000) == sigdebug_marker)
+
+/* Possible values of sigdebug_reason() */
+#define SIGDEBUG_UNDEFINED		0
+#define SIGDEBUG_MIGRATE_SIGNAL		1
+#define SIGDEBUG_MIGRATE_SYSCALL	2
+#define SIGDEBUG_MIGRATE_FAULT		3
+#define SIGDEBUG_MIGRATE_PRIOINV	4
+#define SIGDEBUG_NOMLOCK		5
+#define SIGDEBUG_WATCHDOG		6
+#define SIGDEBUG_RESCNT_IMBALANCE	7
+#define SIGDEBUG_LOCK_BREAK		8
+#define SIGDEBUG_MUTEX_SLEEP		9
+
+#define COBALT_DELAYMAX			2147483647U
+
+/*
+ * Internal accessors to extra siginfo/sigevent fields, extending some
+ * existing base field. The extra data should be grouped in a
+ * dedicated struct type. The extra space is taken from the padding
+ * area available from the original structure definitions.
+ *
+ * e.g. getting the address of the following extension to
+ * _sifields._rt from siginfo_t,
+ *
+ * struct bar {
+ *    int foo;
+ * };
+ *
+ * would be noted as:
+ *
+ * siginfo_t si;
+ * struct bar *p = __cobalt_si_extra(&si, _rt, struct bar);
+ *
+ * This code is shared between kernel and user space. Proper
+ * definitions of siginfo_t and sigevent_t should have been read prior
+ * to including this file.
+ *
+ * CAUTION: this macro does not handle alignment issues for the extra
+ * data. The extra type definition should take care of this.
+ */
+#ifdef __OPTIMIZE__
+extern void *__siginfo_overflow(void);
+static inline
+const void *__check_si_overflow(size_t fldsz, size_t extrasz, const void *p)
+{
+	siginfo_t *si __attribute__((unused));
+
+	if (fldsz + extrasz <= sizeof(si->_sifields))
+		return p;
+
+	return __siginfo_overflow();
+}
+#define __cobalt_si_extra(__si, __basefield, __type)				\
+	((__type *)__check_si_overflow(sizeof(__si->_sifields.__basefield),	\
+	       sizeof(__type), &(__si->_sifields.__basefield) + 1))
+#else
+#define __cobalt_si_extra(__si, __basefield, __type)				\
+	((__type *)((&__si->_sifields.__basefield) + 1))
+#endif
+
+/* Same approach, this time for extending sigevent_t. */
+
+#ifdef __OPTIMIZE__
+extern void *__sigevent_overflow(void);
+static inline
+const void *__check_sev_overflow(size_t fldsz, size_t extrasz, const void *p)
+{
+	sigevent_t *sev __attribute__((unused));
+
+	if (fldsz + extrasz <= sizeof(sev->_sigev_un))
+		return p;
+
+	return __sigevent_overflow();
+}
+#define __cobalt_sev_extra(__sev, __basefield, __type)				\
+	((__type *)__check_sev_overflow(sizeof(__sev->_sigev_un.__basefield),	\
+	       sizeof(__type), &(__sev->_sigev_un.__basefield) + 1))
+#else
+#define __cobalt_sev_extra(__sev, __basefield, __type)				\
+	((__type *)((&__sev->_sigev_un.__basefield) + 1))
+#endif
+
+#endif /* !_COBALT_UAPI_SIGNAL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/sem.h	2022-03-21 12:58:32.118862350 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/corectl.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SEM_H
+#define _COBALT_UAPI_SEM_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define COBALT_SEM_MAGIC (0x86860707)
+#define COBALT_NAMED_SEM_MAGIC (0x86860D0D)
+
+struct cobalt_sem;
+
+struct cobalt_sem_state {
+	atomic_t value;
+	__u32 flags;
+};
+
+union cobalt_sem_union {
+	sem_t native_sem;
+	struct cobalt_sem_shadow {
+		__u32 magic;
+		__s32 state_offset;
+		xnhandle_t handle;
+	} shadow_sem;
+};
+
+struct cobalt_sem_info {
+	unsigned int value;
+	int flags;
+	int nrwait;
+};
+
+#define SEM_FIFO       0x1
+#define SEM_PULSE      0x2
+#define SEM_PSHARED    0x4
+#define SEM_REPORT     0x8
+#define SEM_WARNDEL    0x10
+#define SEM_RAWCLOCK   0x20
+#define SEM_NOBUSYDEL  0x40
+
+#endif /* !_COBALT_UAPI_SEM_H */
+++ linux-patched/include/xenomai/cobalt/uapi/corectl.h	2022-03-21 12:58:32.110862428 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_CORECTL_H
+#define _COBALT_UAPI_CORECTL_H
+
+#define _CC_COBALT_GET_VERSION		0
+#define _CC_COBALT_GET_NR_PIPES		1
+#define _CC_COBALT_GET_NR_TIMERS	2
+
+#define _CC_COBALT_GET_DEBUG			3
+#   define _CC_COBALT_DEBUG_ASSERT		1
+#   define _CC_COBALT_DEBUG_CONTEXT		2
+#   define _CC_COBALT_DEBUG_LOCKING		4
+#   define _CC_COBALT_DEBUG_USER		8
+#   define _CC_COBALT_DEBUG_MUTEX_RELAXED	16
+#   define _CC_COBALT_DEBUG_MUTEX_SLEEP		32
+/* bit 6 (64) formerly used for DEBUG_POSIX_SYNCHRO */
+#   define _CC_COBALT_DEBUG_LEGACY		128
+#   define _CC_COBALT_DEBUG_TRACE_RELAX		256
+#   define _CC_COBALT_DEBUG_NET			512
+
+#define _CC_COBALT_GET_POLICIES		4
+#   define _CC_COBALT_SCHED_FIFO	1
+#   define _CC_COBALT_SCHED_RR		2
+#   define _CC_COBALT_SCHED_WEAK	4
+#   define _CC_COBALT_SCHED_SPORADIC	8
+#   define _CC_COBALT_SCHED_QUOTA	16
+#   define _CC_COBALT_SCHED_TP		32
+
+#define _CC_COBALT_GET_WATCHDOG		5
+#define _CC_COBALT_GET_CORE_STATUS	6
+#define _CC_COBALT_START_CORE		7
+#define _CC_COBALT_STOP_CORE		8
+
+#define _CC_COBALT_GET_NET_CONFIG	9
+#   define _CC_COBALT_NET		0x00000001
+#   define _CC_COBALT_NET_ETH_P_ALL	0x00000002
+#   define _CC_COBALT_NET_IPV4		0x00000004
+#   define _CC_COBALT_NET_ICMP		0x00000008
+#   define _CC_COBALT_NET_NETROUTING	0x00000010
+#   define _CC_COBALT_NET_ROUTER	0x00000020
+#   define _CC_COBALT_NET_UDP		0x00000040
+#   define _CC_COBALT_NET_AF_PACKET	0x00000080
+#   define _CC_COBALT_NET_TDMA		0x00000100
+#   define _CC_COBALT_NET_NOMAC		0x00000200
+#   define _CC_COBALT_NET_CFG		0x00000400
+#   define _CC_COBALT_NET_CAP		0x00000800
+#   define _CC_COBALT_NET_PROXY		0x00001000
+
+
+enum cobalt_run_states {
+	COBALT_STATE_DISABLED,
+	COBALT_STATE_RUNNING,
+	COBALT_STATE_STOPPED,
+	COBALT_STATE_TEARDOWN,
+	COBALT_STATE_WARMUP,
+};
+
+#endif /* !_COBALT_UAPI_CORECTL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/syscall.h	2022-03-21 12:58:32.103862496 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/time.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SYSCALL_H
+#define _COBALT_UAPI_SYSCALL_H
+
+#include <cobalt/uapi/asm-generic/syscall.h>
+
+#define sc_cobalt_bind				0
+#define sc_cobalt_thread_create			1
+#define sc_cobalt_thread_getpid			2
+#define sc_cobalt_thread_setmode		3
+#define sc_cobalt_thread_setname		4
+#define sc_cobalt_thread_join			5
+#define sc_cobalt_thread_kill			6
+#define sc_cobalt_thread_setschedparam_ex	7
+#define sc_cobalt_thread_getschedparam_ex	8
+#define sc_cobalt_thread_getstat		9
+#define sc_cobalt_sem_init			10
+#define sc_cobalt_sem_destroy			11
+#define sc_cobalt_sem_post			12
+#define sc_cobalt_sem_wait			13
+#define sc_cobalt_sem_trywait			14
+#define sc_cobalt_sem_getvalue			15
+#define sc_cobalt_sem_open			16
+#define sc_cobalt_sem_close			17
+#define sc_cobalt_sem_unlink			18
+#define sc_cobalt_sem_timedwait			19
+#define sc_cobalt_sem_inquire			20
+#define sc_cobalt_sem_broadcast_np		21
+#define sc_cobalt_clock_getres			22
+#define sc_cobalt_clock_gettime			23
+#define sc_cobalt_clock_settime			24
+#define sc_cobalt_clock_nanosleep		25
+#define sc_cobalt_mutex_init			26
+#define sc_cobalt_mutex_check_init		27
+#define sc_cobalt_mutex_destroy			28
+#define sc_cobalt_mutex_lock			29
+#define sc_cobalt_mutex_timedlock		30
+#define sc_cobalt_mutex_trylock			31
+#define sc_cobalt_mutex_unlock			32
+#define sc_cobalt_cond_init			33
+#define sc_cobalt_cond_destroy			34
+#define sc_cobalt_cond_wait_prologue		35
+#define sc_cobalt_cond_wait_epilogue		36
+#define sc_cobalt_mq_open			37
+#define sc_cobalt_mq_close			38
+#define sc_cobalt_mq_unlink			39
+#define sc_cobalt_mq_getattr			40
+#define sc_cobalt_mq_timedsend			41
+#define sc_cobalt_mq_timedreceive		42
+#define sc_cobalt_mq_notify			43
+#define sc_cobalt_sched_minprio			44
+#define sc_cobalt_sched_maxprio			45
+#define sc_cobalt_sched_weightprio		46
+#define sc_cobalt_sched_yield			47
+#define sc_cobalt_sched_setscheduler_ex		48
+#define sc_cobalt_sched_getscheduler_ex		49
+#define sc_cobalt_sched_setconfig_np		50
+#define sc_cobalt_sched_getconfig_np		51
+#define sc_cobalt_timer_create			52
+#define sc_cobalt_timer_delete			53
+#define sc_cobalt_timer_settime			54
+#define sc_cobalt_timer_gettime			55
+#define sc_cobalt_timer_getoverrun		56
+#define sc_cobalt_timerfd_create		57
+#define sc_cobalt_timerfd_settime		58
+#define sc_cobalt_timerfd_gettime		59
+#define sc_cobalt_sigwait			60
+#define sc_cobalt_sigwaitinfo			61
+#define sc_cobalt_sigtimedwait			62
+#define sc_cobalt_sigpending			63
+#define sc_cobalt_kill				64
+#define sc_cobalt_sigqueue			65
+#define sc_cobalt_monitor_init			66
+#define sc_cobalt_monitor_destroy		67
+#define sc_cobalt_monitor_enter			68
+#define sc_cobalt_monitor_wait			69
+#define sc_cobalt_monitor_sync			70
+#define sc_cobalt_monitor_exit			71
+#define sc_cobalt_event_init			72
+#define sc_cobalt_event_wait			73
+#define sc_cobalt_event_sync			74
+#define sc_cobalt_event_destroy			75
+#define sc_cobalt_event_inquire			76
+#define sc_cobalt_open				77
+#define sc_cobalt_socket			78
+#define sc_cobalt_close				79
+#define sc_cobalt_ioctl				80
+#define sc_cobalt_read				81
+#define sc_cobalt_write				82
+#define sc_cobalt_recvmsg			83
+#define sc_cobalt_sendmsg			84
+#define sc_cobalt_mmap				85
+#define sc_cobalt_select			86
+#define sc_cobalt_fcntl				87
+#define sc_cobalt_migrate			88
+#define sc_cobalt_archcall			89
+#define sc_cobalt_trace				90
+#define sc_cobalt_corectl			91
+#define sc_cobalt_get_current			92
+/* 93: formerly mayday */
+#define sc_cobalt_backtrace			94
+#define sc_cobalt_serialdbg			95
+#define sc_cobalt_extend			96
+#define sc_cobalt_ftrace_puts			97
+#define sc_cobalt_recvmmsg			98
+#define sc_cobalt_sendmmsg			99
+#define sc_cobalt_clock_adjtime			100
+#define sc_cobalt_thread_setschedprio		101
+#define sc_cobalt_sem_timedwait64		102
+#define sc_cobalt_clock_gettime64		103
+#define sc_cobalt_clock_settime64		104
+#define sc_cobalt_clock_nanosleep64		105
+#define sc_cobalt_clock_getres64		106
+#define sc_cobalt_clock_adjtime64		107
+#define sc_cobalt_mutex_timedlock64		108
+#define sc_cobalt_mq_timedsend64		109
+#define sc_cobalt_mq_timedreceive64		110
+#define sc_cobalt_sigtimedwait64		111
+#define sc_cobalt_monitor_wait64		112
+#define sc_cobalt_event_wait64			113
+#define sc_cobalt_recvmmsg64			114
+
+#define __NR_COBALT_SYSCALLS			128 /* Power of 2 */
+
+#endif /* !_COBALT_UAPI_SYSCALL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/time.h	2022-03-21 12:58:32.096862564 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/event.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_TIME_H
+#define _COBALT_UAPI_TIME_H
+
+#ifndef CLOCK_MONOTONIC_RAW
+#define CLOCK_MONOTONIC_RAW  4
+#endif
+
+/*
+ * Additional clock ids we manage are supposed not to collide with any
+ * of the POSIX and Linux kernel definitions so that no ambiguities
+ * arise when porting applications in both directions.
+ *
+ * 0  .. 31   regular POSIX/linux clock ids.
+ * 32 .. 63   statically reserved Cobalt clocks
+ * 64 .. 127  dynamically registered Cobalt clocks (external)
+ *
+ * CAUTION: clock ids must fit within a 7bit value, see
+ * include/cobalt/uapi/thread.h (e.g. cobalt_condattr).
+ */
+#define __COBALT_CLOCK_STATIC(nr)	((clockid_t)(nr + 32))
+
+#define CLOCK_HOST_REALTIME  __COBALT_CLOCK_STATIC(0)
+
+#define COBALT_MAX_EXTCLOCKS  64
+
+#define __COBALT_CLOCK_EXT(nr)		((clockid_t)(nr) | (1 << 6))
+#define __COBALT_CLOCK_EXT_P(id)	((int)(id) >= 64 && (int)(id) < 128)
+#define __COBALT_CLOCK_EXT_INDEX(id)	((int)(id) & ~(1 << 6))
+
+/*
+ * Additional timerfd defines
+ *
+ * when passing TFD_WAKEUP to timer_settime, any timer expiration
+ * unblocks the thread having issued timer_settime.
+ */
+#define TFD_WAKEUP	(1 << 2)
+
+#endif /* !_COBALT_UAPI_TIME_H */
+++ linux-patched/include/xenomai/cobalt/uapi/event.h	2022-03-21 12:58:32.088862642 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/monitor.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_EVENT_H
+#define _COBALT_UAPI_EVENT_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+struct cobalt_event_state {
+	__u32 value;
+	__u32 flags;
+#define COBALT_EVENT_PENDED  0x1
+	__u32 nwaiters;
+};
+
+struct cobalt_event;
+
+/* Creation flags. */
+#define COBALT_EVENT_FIFO    0x0
+#define COBALT_EVENT_PRIO    0x1
+#define COBALT_EVENT_SHARED  0x2
+
+/* Wait mode. */
+#define COBALT_EVENT_ALL  0x0
+#define COBALT_EVENT_ANY  0x1
+
+struct cobalt_event_shadow {
+	__u32 state_offset;
+	__u32 flags;
+	xnhandle_t handle;
+};
+
+struct cobalt_event_info {
+	unsigned int value;
+	int flags;
+	int nrwait;
+};
+
+typedef struct cobalt_event_shadow cobalt_event_t;
+
+#endif /* !_COBALT_UAPI_EVENT_H */
+++ linux-patched/include/xenomai/cobalt/uapi/monitor.h	2022-03-21 12:58:32.081862711 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/asm-generic/arith.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_MONITOR_H
+#define _COBALT_UAPI_MONITOR_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+struct cobalt_monitor_state {
+	atomic_t owner;
+	__u32 flags;
+#define COBALT_MONITOR_GRANTED    0x01
+#define COBALT_MONITOR_DRAINED    0x02
+#define COBALT_MONITOR_SIGNALED   0x03 /* i.e. GRANTED or DRAINED */
+#define COBALT_MONITOR_BROADCAST  0x04
+#define COBALT_MONITOR_PENDED     0x08
+};
+
+struct cobalt_monitor;
+
+struct cobalt_monitor_shadow {
+	__u32 state_offset;
+	__u32 flags;
+	xnhandle_t handle;
+#define COBALT_MONITOR_SHARED     0x1
+#define COBALT_MONITOR_WAITGRANT  0x0
+#define COBALT_MONITOR_WAITDRAIN  0x1
+};
+
+typedef struct cobalt_monitor_shadow cobalt_monitor_t;
+
+#endif /* !_COBALT_UAPI_MONITOR_H */
+++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/arith.h	2022-03-21 12:58:32.074862779 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/asm-generic/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ *   Generic arithmetic/conversion routines.
+ *   Copyright &copy; 2005 Stelian Pop.
+ *   Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_ARITH_H
+#define _COBALT_UAPI_ASM_GENERIC_ARITH_H
+
+#ifndef xnarch_u64tou32
+#define xnarch_u64tou32(ull, h, l) ({		\
+      union {					\
+	      unsigned long long _ull;		\
+	      struct endianstruct _s;		\
+      } _u;					\
+      _u._ull = (ull);				\
+      (h) = _u._s._h;				\
+      (l) = _u._s._l;				\
+})
+#endif /* !xnarch_u64tou32 */
+
+#ifndef xnarch_u64fromu32
+#define xnarch_u64fromu32(h, l) ({		\
+	union {					\
+		unsigned long long _ull;	\
+		struct endianstruct _s;		\
+	} _u;					\
+	_u._s._h = (h);				\
+	_u._s._l = (l);				\
+	_u._ull;				\
+})
+#endif /* !xnarch_u64fromu32 */
+
+#ifndef xnarch_ullmul
+static inline __attribute__((__const__)) unsigned long long
+xnarch_generic_ullmul(const unsigned m0, const unsigned m1)
+{
+	return (unsigned long long) m0 * m1;
+}
+#define xnarch_ullmul(m0,m1) xnarch_generic_ullmul((m0),(m1))
+#endif /* !xnarch_ullmul */
+
+#ifndef xnarch_ulldiv
+static inline unsigned long long xnarch_generic_ulldiv (unsigned long long ull,
+							const unsigned uld,
+							unsigned long *const rp)
+{
+	const unsigned r = do_div(ull, uld);
+
+	if (rp)
+		*rp = r;
+
+	return ull;
+}
+#define xnarch_ulldiv(ull,uld,rp) xnarch_generic_ulldiv((ull),(uld),(rp))
+#endif /* !xnarch_ulldiv */
+
+#ifndef xnarch_uldivrem
+#define xnarch_uldivrem(ull,ul,rp) ((unsigned) xnarch_ulldiv((ull),(ul),(rp)))
+#endif /* !xnarch_uldivrem */
+
+#ifndef xnarch_divmod64
+static inline unsigned long long
+xnarch_generic_divmod64(unsigned long long a,
+			unsigned long long b,
+			unsigned long long *rem)
+{
+	unsigned long long q;
+#if defined(__KERNEL__) && BITS_PER_LONG < 64
+	unsigned long long
+		xnarch_generic_full_divmod64(unsigned long long a,
+					     unsigned long long b,
+					     unsigned long long *rem);
+	if (b <= 0xffffffffULL) {
+		unsigned long r;
+		q = xnarch_ulldiv(a, b, &r);
+		if (rem)
+			*rem = r;
+	} else {
+		if (a < b) {
+			if (rem)
+				*rem = a;
+			return 0;
+		}
+
+		return xnarch_generic_full_divmod64(a, b, rem);
+	}
+#else /* !(__KERNEL__ && BITS_PER_LONG < 64) */
+	q = a / b;
+	if (rem)
+		*rem = a % b;
+#endif  /* !(__KERNEL__ && BITS_PER_LONG < 64) */
+	return q;
+}
+#define xnarch_divmod64(a,b,rp) xnarch_generic_divmod64((a),(b),(rp))
+#endif /* !xnarch_divmod64 */
+
+#ifndef xnarch_imuldiv
+static inline __attribute__((__const__)) int xnarch_generic_imuldiv(int i,
+								    int mult,
+								    int div)
+{
+	/* (int)i = (unsigned long long)i*(unsigned)(mult)/(unsigned)div. */
+	const unsigned long long ull = xnarch_ullmul(i, mult);
+	return xnarch_uldivrem(ull, div, NULL);
+}
+#define xnarch_imuldiv(i,m,d) xnarch_generic_imuldiv((i),(m),(d))
+#endif /* !xnarch_imuldiv */
+
+#ifndef xnarch_imuldiv_ceil
+static inline __attribute__((__const__)) int xnarch_generic_imuldiv_ceil(int i,
+									 int mult,
+									 int div)
+{
+	/* Same as xnarch_generic_imuldiv, rounding up. */
+	const unsigned long long ull = xnarch_ullmul(i, mult);
+	return xnarch_uldivrem(ull + (unsigned)div - 1, div, NULL);
+}
+#define xnarch_imuldiv_ceil(i,m,d) xnarch_generic_imuldiv_ceil((i),(m),(d))
+#endif /* !xnarch_imuldiv_ceil */
+
+/* Division of an unsigned 96 bits ((h << 32) + l) by an unsigned 32 bits.
+   Building block for llimd. Without const qualifiers, gcc reload registers
+   after each call to uldivrem. */
+static inline unsigned long long
+xnarch_generic_div96by32(const unsigned long long h,
+			 const unsigned l,
+			 const unsigned d,
+			 unsigned long *const rp)
+{
+	unsigned long rh;
+	const unsigned qh = xnarch_uldivrem(h, d, &rh);
+	const unsigned long long t = xnarch_u64fromu32(rh, l);
+	const unsigned ql = xnarch_uldivrem(t, d, rp);
+
+	return xnarch_u64fromu32(qh, ql);
+}
+
+#ifndef xnarch_llimd
+static inline __attribute__((__const__))
+unsigned long long xnarch_generic_ullimd(const unsigned long long op,
+					 const unsigned m,
+					 const unsigned d)
+{
+	unsigned int oph, opl, tlh, tll;
+	unsigned long long th, tl;
+
+	xnarch_u64tou32(op, oph, opl);
+	tl = xnarch_ullmul(opl, m);
+	xnarch_u64tou32(tl, tlh, tll);
+	th = xnarch_ullmul(oph, m);
+	th += tlh;
+
+	return xnarch_generic_div96by32(th, tll, d, NULL);
+}
+
+static inline __attribute__((__const__)) long long
+xnarch_generic_llimd (long long op, unsigned m, unsigned d)
+{
+	long long ret;
+	int sign = 0;
+
+	if (op < 0LL) {
+		sign = 1;
+		op = -op;
+	}
+	ret = xnarch_generic_ullimd(op, m, d);
+
+	return sign ? -ret : ret;
+}
+#define xnarch_llimd(ll,m,d) xnarch_generic_llimd((ll),(m),(d))
+#endif /* !xnarch_llimd */
+
+#ifndef _xnarch_u96shift
+#define xnarch_u96shift(h, m, l, s) ({		\
+	unsigned int _l = (l);			\
+	unsigned int _m = (m);			\
+	unsigned int _s = (s);			\
+	_l >>= _s;				\
+	_l |= (_m << (32 - _s));		\
+	_m >>= _s;				\
+	_m |= ((h) << (32 - _s));		\
+	xnarch_u64fromu32(_m, _l);		\
+})
+#endif /* !xnarch_u96shift */
+
+static inline long long xnarch_llmi(int i, int j)
+{
+	/* Fast 32x32->64 signed multiplication */
+	return (long long) i * j;
+}
+
+#ifndef xnarch_llmulshft
+/* Fast scaled-math-based replacement for long long multiply-divide */
+static inline long long
+xnarch_generic_llmulshft(const long long op,
+			  const unsigned m,
+			  const unsigned s)
+{
+	unsigned int oph, opl, tlh, tll, thh, thl;
+	unsigned long long th, tl;
+
+	xnarch_u64tou32(op, oph, opl);
+	tl = xnarch_ullmul(opl, m);
+	xnarch_u64tou32(tl, tlh, tll);
+	th = xnarch_llmi(oph, m);
+	th += tlh;
+	xnarch_u64tou32(th, thh, thl);
+
+	return xnarch_u96shift(thh, thl, tll, s);
+}
+#define xnarch_llmulshft(ll, m, s) xnarch_generic_llmulshft((ll), (m), (s))
+#endif /* !xnarch_llmulshft */
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+
+/* Representation of a 32 bits fraction. */
+struct xnarch_u32frac {
+	unsigned long long frac;
+	unsigned integ;
+};
+
+static inline void xnarch_init_u32frac(struct xnarch_u32frac *const f,
+				       const unsigned m,
+				       const unsigned d)
+{
+	/*
+	 * Avoid clever compiler optimizations to occur when d is
+	 * known at compile-time. The performance of this function is
+	 * not critical since it is only called at init time.
+	 */
+	volatile unsigned vol_d = d;
+	f->integ = m / d;
+	f->frac = xnarch_generic_div96by32
+		(xnarch_u64fromu32(m % d, 0), 0, vol_d, NULL);
+}
+
+#ifndef xnarch_nodiv_imuldiv
+static inline __attribute__((__const__)) unsigned
+xnarch_generic_nodiv_imuldiv(unsigned op, const struct xnarch_u32frac f)
+{
+	return (xnarch_ullmul(op, f.frac >> 32) >> 32) + f.integ * op;
+}
+#define xnarch_nodiv_imuldiv(op, f) xnarch_generic_nodiv_imuldiv((op),(f))
+#endif /* xnarch_nodiv_imuldiv */
+
+#ifndef xnarch_nodiv_imuldiv_ceil
+static inline __attribute__((__const__)) unsigned
+xnarch_generic_nodiv_imuldiv_ceil(unsigned op, const struct xnarch_u32frac f)
+{
+	unsigned long long full = xnarch_ullmul(op, f.frac >> 32) + ~0U;
+	return (full >> 32) + f.integ * op;
+}
+#define xnarch_nodiv_imuldiv_ceil(op, f) \
+	xnarch_generic_nodiv_imuldiv_ceil((op),(f))
+#endif /* xnarch_nodiv_imuldiv_ceil */
+
+#ifndef xnarch_nodiv_ullimd
+
+#ifndef xnarch_add96and64
+#error "xnarch_add96and64 must be implemented."
+#endif
+
+static inline __attribute__((__const__)) unsigned long long
+xnarch_mul64by64_high(const unsigned long long op, const unsigned long long m)
+{
+	/* Compute high 64 bits of multiplication 64 bits x 64 bits. */
+	register unsigned long long t0, t1, t2, t3;
+	register unsigned int oph, opl, mh, ml, t0h, t0l, t1h, t1l, t2h, t2l, t3h, t3l;
+
+	xnarch_u64tou32(op, oph, opl);
+	xnarch_u64tou32(m, mh, ml);
+	t0 = xnarch_ullmul(opl, ml);
+	xnarch_u64tou32(t0, t0h, t0l);
+	t3 = xnarch_ullmul(oph, mh);
+	xnarch_u64tou32(t3, t3h, t3l);
+	xnarch_add96and64(t3h, t3l, t0h, 0, t0l >> 31);
+	t1 = xnarch_ullmul(oph, ml);
+	xnarch_u64tou32(t1, t1h, t1l);
+	xnarch_add96and64(t3h, t3l, t0h, t1h, t1l);
+	t2 = xnarch_ullmul(opl, mh);
+	xnarch_u64tou32(t2, t2h, t2l);
+	xnarch_add96and64(t3h, t3l, t0h, t2h, t2l);
+
+	return xnarch_u64fromu32(t3h, t3l);
+}
+
+static inline unsigned long long
+xnarch_generic_nodiv_ullimd(const unsigned long long op,
+			    const unsigned long long frac,
+			    unsigned int integ)
+{
+	return xnarch_mul64by64_high(op, frac) + integ * op;
+}
+#define xnarch_nodiv_ullimd(op, f, i)  xnarch_generic_nodiv_ullimd((op),(f), (i))
+#endif /* !xnarch_nodiv_ullimd */
+
+#ifndef xnarch_nodiv_llimd
+static inline __attribute__((__const__)) long long
+xnarch_generic_nodiv_llimd(long long op, unsigned long long frac,
+			   unsigned int integ)
+{
+	long long ret;
+	int sign = 0;
+
+	if (op < 0LL) {
+		sign = 1;
+		op = -op;
+	}
+	ret = xnarch_nodiv_ullimd(op, frac, integ);
+
+	return sign ? -ret : ret;
+}
+#define xnarch_nodiv_llimd(ll,frac,integ) xnarch_generic_nodiv_llimd((ll),(frac),(integ))
+#endif /* !xnarch_nodiv_llimd */
+
+#endif /* XNARCH_HAVE_NODIV_LLIMD */
+
+static inline void xnarch_init_llmulshft(const unsigned m_in,
+					 const unsigned d_in,
+					 unsigned *m_out,
+					 unsigned *s_out)
+{
+	/*
+	 * Avoid clever compiler optimizations to occur when d is
+	 * known at compile-time. The performance of this function is
+	 * not critical since it is only called at init time.
+	 */
+	volatile unsigned int vol_d = d_in;
+	unsigned long long mult;
+
+	*s_out = 31;
+	while (1) {
+		mult = ((unsigned long long)m_in) << *s_out;
+		do_div(mult, vol_d);
+		if (mult <= 0x7FFFFFFF)
+			break;
+		(*s_out)--;
+	}
+	*m_out = (unsigned int)mult;
+}
+
+#define xnarch_ullmod(ull,uld,rem)   ({ xnarch_ulldiv(ull,uld,rem); (*rem); })
+#define xnarch_uldiv(ull, d)         xnarch_uldivrem(ull, d, NULL)
+#define xnarch_ulmod(ull, d)         ({ unsigned long _rem;	\
+					xnarch_uldivrem(ull,d,&_rem); _rem; })
+
+#define xnarch_div64(a,b)            xnarch_divmod64((a),(b),NULL)
+#define xnarch_mod64(a,b)            ({ unsigned long long _rem; \
+					xnarch_divmod64((a),(b),&_rem); _rem; })
+
+#endif /* _COBALT_UAPI_ASM_GENERIC_ARITH_H */
+++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/syscall.h	2022-03-21 12:58:32.066862857 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/asm-generic/features.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_SYSCALL_H
+#define _COBALT_UAPI_ASM_GENERIC_SYSCALL_H
+
+#include <linux/types.h>
+#include <asm/xenomai/uapi/features.h>
+#include <asm/xenomai/uapi/syscall.h>
+
+#define __COBALT_SYSCALL_BIT	0x10000000
+
+struct cobalt_bindreq {
+	/** Features userland requires. */
+	__u32 feat_req;
+	/** ABI revision userland uses. */
+	__u32 abi_rev;
+	/** Features the Cobalt core provides. */
+	struct cobalt_featinfo feat_ret;
+};
+
+#define COBALT_SECONDARY  0
+#define COBALT_PRIMARY    1
+
+#endif /* !_COBALT_UAPI_ASM_GENERIC_SYSCALL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/features.h	2022-03-21 12:58:32.059862925 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/cond.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_FEATURES_H
+#define _COBALT_UAPI_ASM_GENERIC_FEATURES_H
+
+#include <linux/types.h>
+
+#define XNFEAT_STRING_LEN 64
+
+struct cobalt_featinfo {
+	/** Real-time clock frequency */
+	__u64 clock_freq;
+	/** Offset of nkvdso in the sem heap. */
+	__u32 vdso_offset;
+	/** ABI revision level. */
+	__u32 feat_abirev;
+	/** Available feature set. */
+	__u32 feat_all;
+	/** Mandatory features (when requested). */
+	__u32 feat_man;
+	/** Requested feature set. */
+	__u32 feat_req;
+	/** Missing features. */
+	__u32 feat_mis;
+	char feat_all_s[XNFEAT_STRING_LEN];
+	char feat_man_s[XNFEAT_STRING_LEN];
+	char feat_req_s[XNFEAT_STRING_LEN];
+	char feat_mis_s[XNFEAT_STRING_LEN];
+	/* Architecture-specific features. */
+	struct cobalt_featinfo_archdep feat_arch;
+};
+
+#define __xn_feat_smp         0x80000000
+#define __xn_feat_nosmp       0x40000000
+#define __xn_feat_fastsynch   0x20000000
+#define __xn_feat_nofastsynch 0x10000000
+#define __xn_feat_control     0x08000000
+#define __xn_feat_prioceiling 0x04000000
+
+#ifdef CONFIG_SMP
+#define __xn_feat_smp_mask __xn_feat_smp
+#else
+#define __xn_feat_smp_mask __xn_feat_nosmp
+#endif
+
+/*
+ * Revisit: all archs currently support fast locking, and there is no
+ * reason for any future port not to provide this. This will be
+ * written in stone at the next ABI update, when fastsynch support is
+ * dropped from the optional feature set.
+ */
+#define __xn_feat_fastsynch_mask __xn_feat_fastsynch
+
+/* List of generic features kernel or userland may support */
+#define __xn_feat_generic_mask			\
+	(__xn_feat_smp_mask		|	\
+	 __xn_feat_fastsynch_mask 	|	\
+	 __xn_feat_prioceiling)
+
+/*
+ * List of features both sides have to agree on: If userland supports
+ * it, the kernel has to provide it, too. This means backward
+ * compatibility between older userland and newer kernel may be
+ * supported for those features, but forward compatibility between
+ * newer userland and older kernel cannot.
+ */
+#define __xn_feat_generic_man_mask		\
+	(__xn_feat_fastsynch		|	\
+	 __xn_feat_nofastsynch		|	\
+	 __xn_feat_nosmp		|	\
+	 __xn_feat_prioceiling)
+
+static inline
+const char *get_generic_feature_label(unsigned int feature)
+{
+	switch (feature) {
+	case __xn_feat_smp:
+		return "smp";
+	case __xn_feat_nosmp:
+		return "nosmp";
+	case __xn_feat_fastsynch:
+		return "fastsynch";
+	case __xn_feat_nofastsynch:
+		return "nofastsynch";
+	case __xn_feat_control:
+		return "control";
+	case __xn_feat_prioceiling:
+		return "prioceiling";
+	default:
+		return 0;
+	}
+}
+
+static inline int check_abi_revision(unsigned long abirev)
+{
+	return abirev == XENOMAI_ABI_REV;
+}
+
+#endif /* !_COBALT_UAPI_ASM_GENERIC_FEATURES_H */
+++ linux-patched/include/xenomai/cobalt/uapi/cond.h	2022-03-21 12:58:32.052862993 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_COND_H
+#define _COBALT_UAPI_COND_H
+
+#include <cobalt/uapi/mutex.h>
+
+#define COBALT_COND_MAGIC 0x86860505
+
+struct cobalt_cond_state {
+	__u32 pending_signals;
+	__u32 mutex_state_offset;
+};
+
+union cobalt_cond_union {
+	pthread_cond_t native_cond;
+	struct cobalt_cond_shadow {
+		__u32 magic;
+		__u32 state_offset;
+		xnhandle_t handle;
+	} shadow_cond;
+};
+
+#endif /* !_COBALT_UAPI_COND_H */
+++ linux-patched/include/xenomai/cobalt/uapi/thread.h	2022-03-21 12:58:32.044863071 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/schedparam.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_THREAD_H
+#define _COBALT_UAPI_THREAD_H
+
+#include <cobalt/uapi/kernel/thread.h>
+
+#define PTHREAD_WARNSW             XNWARN
+#define PTHREAD_LOCK_SCHED         XNLOCK
+#define PTHREAD_DISABLE_LOCKBREAK  XNTRAPLB
+#define PTHREAD_CONFORMING     0
+
+struct cobalt_mutexattr {
+	int type : 3;
+	int protocol : 3;
+	int pshared : 1;
+	int __pad : 1;
+	int ceiling : 8;  /* prio-1, (XN)SCHED_FIFO range. */
+};
+
+struct cobalt_condattr {
+	int clock : 7;
+	int pshared : 1;
+};
+
+struct cobalt_threadstat {
+	__u64 xtime;
+	__u64 timeout;
+	__u64 msw;
+	__u64 csw;
+	__u64 xsc;
+	__u32 status;
+	__u32 pf;
+	int cpu;
+	int cprio;
+	char name[XNOBJECT_NAME_LEN];
+	char personality[XNOBJECT_NAME_LEN];
+};
+
+#endif /* !_COBALT_UAPI_THREAD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/schedparam.h	2022-03-21 12:58:31.742866016 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/vfile.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHEDPARAM_H
+#define _COBALT_KERNEL_SCHEDPARAM_H
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+struct xnsched_idle_param {
+	int prio;
+};
+
+struct xnsched_weak_param {
+	int prio;
+};
+
+struct xnsched_rt_param {
+	int prio;
+};
+
+struct xnsched_tp_param {
+	int prio;
+	int ptid;	/* partition id. */
+};
+
+struct xnsched_sporadic_param {
+	xnticks_t init_budget;
+	xnticks_t repl_period;
+	int max_repl;
+	int low_prio;
+	int normal_prio;
+	int current_prio;
+};
+
+struct xnsched_quota_param {
+	int prio;
+	int tgid;	/* thread group id. */
+};
+
+union xnsched_policy_param {
+	struct xnsched_idle_param idle;
+	struct xnsched_rt_param rt;
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	struct xnsched_weak_param weak;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	struct xnsched_tp_param tp;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	struct xnsched_sporadic_param pss;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	struct xnsched_quota_param quota;
+#endif
+};
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHEDPARAM_H */
+++ linux-patched/include/xenomai/cobalt/kernel/vfile.h	2022-03-21 12:58:31.735866084 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/synch.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_KERNEL_VFILE_H
+#define _COBALT_KERNEL_VFILE_H
+
+#if defined(CONFIG_XENO_OPT_VFILE) || defined(DOXYGEN_CPP)
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <cobalt/kernel/lock.h>
+
+/**
+ * @addtogroup cobalt_core_vfile
+ * @{
+ */
+
+struct xnvfile_directory;
+struct xnvfile_regular_iterator;
+struct xnvfile_snapshot_iterator;
+struct xnvfile_lock_ops;
+
+struct xnvfile {
+	struct proc_dir_entry *pde;
+	struct file *file;
+	struct xnvfile_lock_ops *lockops;
+	int refcnt;
+	void *private;
+};
+
+/**
+ * @brief Vfile locking operations
+ * @anchor vfile_lockops
+ *
+ * This structure describes the operations to be provided for
+ * implementing locking support on vfiles. They apply to both
+ * snapshot-driven and regular vfiles.
+ */
+struct xnvfile_lock_ops {
+	/**
+	 * @anchor lockops_get
+	 * This handler should grab the desired lock.
+	 *
+	 * @param vfile A pointer to the virtual file which needs
+	 * locking.
+	 *
+	 * @return zero should be returned if the call
+	 * succeeds. Otherwise, a negative error code can be returned;
+	 * upon error, the current vfile operation is aborted, and the
+	 * user-space caller is passed back the error value.
+	 */
+	int (*get)(struct xnvfile *vfile);
+	/**
+	 * @anchor lockops_put This handler should release the lock
+	 * previously grabbed by the @ref lockops_get "get() handler".
+	 *
+	 * @param vfile A pointer to the virtual file which currently
+	 * holds the lock to release.
+	 */
+	void (*put)(struct xnvfile *vfile);
+};
+
+struct xnvfile_hostlock_class {
+	struct xnvfile_lock_ops ops;
+	struct mutex mutex;
+};
+
+struct xnvfile_nklock_class {
+	struct xnvfile_lock_ops ops;
+	spl_t s;
+};
+
+struct xnvfile_input {
+	const char __user *u_buf;
+	size_t size;
+	struct xnvfile *vfile;
+};
+
+/**
+ * @brief Regular vfile operation descriptor
+ * @anchor regular_ops
+ *
+ * This structure describes the operations available with a regular
+ * vfile. It defines handlers for sending back formatted kernel data
+ * upon a user-space read request, and for obtaining user data upon a
+ * user-space write request.
+ */
+struct xnvfile_regular_ops {
+	/**
+	 * @anchor regular_rewind This handler is called only once,
+	 * when the virtual file is opened, before the @ref
+	 * regular_begin "begin() handler" is invoked.
+	 *
+	 * @param it A pointer to the vfile iterator which will be
+	 * used to read the file contents.
+	 *
+	 * @return Zero should be returned upon success. Otherwise, a
+	 * negative error code aborts the operation, and is passed
+	 * back to the reader.
+	 *
+	 * @note This handler is optional. It should not be used to
+	 * allocate resources but rather to perform consistency
+	 * checks, since no closure call is issued in case the open
+	 * sequence eventually fails.
+	 */
+	int (*rewind)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_begin
+	 * This handler should prepare for iterating over the records
+	 * upon a read request, starting from the specified position.
+	 *
+	 * @param it A pointer to the current vfile iterator. On
+	 * entry, it->pos is set to the (0-based) position of the
+	 * first record to output. This handler may be called multiple
+	 * times with different position requests.
+	 *
+	 * @return A pointer to the first record to format and output,
+	 * to be passed to the @ref regular_show "show() handler" as
+	 * its @a data parameter, if the call succeeds. Otherwise:
+	 *
+	 * - NULL in case no record is available, in which case the
+	 * read operation will terminate immediately with no output.
+	 *
+	 * - VFILE_SEQ_START, a special value indicating that @ref
+	 * regular_show "the show() handler" should receive a NULL
+	 * data pointer first, in order to output a header.
+	 *
+	 * - ERR_PTR(errno), where errno is a negative error code;
+	 * upon error, the current operation will be aborted
+	 * immediately.
+	 *
+	 * @note This handler is optional; if none is given in the
+	 * operation descriptor (i.e. NULL value), the @ref
+	 * regular_show "show() handler()" will be called only once
+	 * for a read operation, with a NULL @a data parameter. This
+	 * particular setting is convenient for simple regular vfiles
+	 * having a single, fixed record to output.
+	 */
+	void *(*begin)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_next
+	 * This handler should return the address of the next record
+	 * to format and output by the @ref regular_show "show()
+	 * handler".
+	 *
+	 * @param it A pointer to the current vfile iterator. On
+	 * entry, it->pos is set to the (0-based) position of the
+	 * next record to output.
+	 *
+	 * @return A pointer to the next record to format and output,
+	 * to be passed to the @ref regular_show "show() handler" as
+	 * its @a data parameter, if the call succeeds. Otherwise:
+	 *
+	 * - NULL in case no record is available, in which case the
+	 * read operation will terminate immediately with no output.
+	 *
+	 * - ERR_PTR(errno), where errno is a negative error code;
+	 * upon error, the current operation will be aborted
+	 * immediately.
+	 *
+	 * @note This handler is optional; if none is given in the
+	 * operation descriptor (i.e. NULL value), the read operation
+	 * will stop after the first invocation of the @ref regular_show
+	 * "show() handler".
+	 */
+	void *(*next)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_end
+	 * This handler is called after all records have been output.
+	 *
+	 * @param it A pointer to the current vfile iterator.
+	 *
+	 * @note This handler is optional and the pointer may be NULL.
+	 */
+	void (*end)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_show
+	 * This handler should format and output a record.
+	 *
+	 * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and
+	 * xnvfile_putc() are available to format and/or emit the
+	 * output. All routines take the iterator argument @a it as
+	 * their first parameter.
+	 *
+	 * @param it A pointer to the current vfile iterator.
+	 *
+	 * @param data A pointer to the record to format then
+	 * output. The first call to the handler may receive a NULL @a
+	 * data pointer, depending on the presence and/or return of a
+	 * @ref regular_begin "hander"; the show handler should test
+	 * this special value to output any header that fits, prior to
+	 * receiving more calls with actual records.
+	 *
+	 * @return zero if the call succeeds, also indicating that the
+	 * handler should be called for the next record if
+	 * any. Otherwise:
+	 *
+	 * - A negative error code. This will abort the output phase,
+	 * and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped and will not be output.
+	 */
+	int (*show)(struct xnvfile_regular_iterator *it, void *data);
+	/**
+	 * @anchor regular_store
+	 * This handler receives data written to the vfile, likely for
+	 * updating some kernel setting, or triggering any other
+	 * action which fits. This is the only handler which deals
+	 * with the write-side of a vfile.  It is called when writing
+	 * to the /proc entry of the vfile from a user-space process.
+	 *
+	 * The input data is described by a descriptor passed to the
+	 * handler, which may be subsequently passed to parsing helper
+	 * routines.  For instance, xnvfile_get_string() will accept
+	 * the input descriptor for returning the written data as a
+	 * null-terminated character string. On the other hand,
+	 * xnvfile_get_integer() will attempt to return a long integer
+	 * from the input data.
+	 *
+	 * @param input A pointer to an input descriptor. It refers to
+	 * an opaque data from the handler's standpoint.
+	 *
+	 * @return the number of bytes read from the input descriptor
+	 * if the call succeeds. Otherwise, a negative error code.
+	 * Return values from parsing helper routines are commonly
+	 * passed back to the caller by the @ref regular_store
+	 * "store() handler".
+	 *
+	 * @note This handler is optional, and may be omitted for
+	 * read-only vfiles.
+	 */
+	ssize_t (*store)(struct xnvfile_input *input);
+};
+
+struct xnvfile_regular {
+	struct xnvfile entry;
+	size_t privsz;
+	struct xnvfile_regular_ops *ops;
+};
+
+struct xnvfile_regular_template {
+	size_t privsz;
+	struct xnvfile_regular_ops *ops;
+	struct xnvfile_lock_ops *lockops;
+};
+
+/**
+ * @brief Regular vfile iterator
+ * @anchor regular_iterator
+ *
+ * This structure defines an iterator over a regular vfile.
+ */
+struct xnvfile_regular_iterator {
+	/** Current record position while iterating. */
+	loff_t pos;
+	/** Backlink to the host sequential file supporting the vfile. */
+	struct seq_file *seq;
+	/** Backlink to the vfile being read. */
+	struct xnvfile_regular *vfile;
+	/**
+	 * Start of private area. Use xnvfile_iterator_priv() to
+	 * address it.
+	 */
+	char private[0];
+};
+
+/**
+ * @brief Snapshot vfile operation descriptor
+ * @anchor snapshot_ops
+ *
+ * This structure describes the operations available with a
+ * snapshot-driven vfile. It defines handlers for returning a
+ * printable snapshot of some Xenomai object contents upon a
+ * user-space read request, and for updating this object upon a
+ * user-space write request.
+ */
+struct xnvfile_snapshot_ops {
+	/**
+	 * @anchor snapshot_rewind
+	 * This handler (re-)initializes the data collection, moving
+	 * the seek pointer at the first record. When the file
+	 * revision tag is touched while collecting data, the current
+	 * reading is aborted, all collected data dropped, and the
+	 * vfile is eventually rewound.
+	 *
+	 * @param it A pointer to the current snapshot iterator. Two
+	 * useful information can be retrieved from this iterator in
+	 * this context:
+	 *
+	 * - it->vfile is a pointer to the descriptor of the virtual
+	 * file being rewound.
+	 *
+	 * - xnvfile_iterator_priv(it) returns a pointer to the
+	 * private data area, available from the descriptor, which
+	 * size is vfile->privsz. If the latter size is zero, the
+	 * returned pointer is meaningless and should not be used.
+	 *
+	 * @return A negative error code aborts the data collection,
+	 * and is passed back to the reader. Otherwise:
+	 *
+	 * - a strictly positive value is interpreted as the total
+	 * number of records which will be returned by the @ref
+	 * snapshot_next "next() handler" during the data collection
+	 * phase. If no @ref snapshot_begin "begin() handler" is
+	 * provided in the @ref snapshot_ops "operation descriptor",
+	 * this value is used to allocate the snapshot buffer
+	 * internally. The size of this buffer would then be
+	 * vfile->datasz * value.
+	 *
+	 * - zero leaves the allocation to the @ref snapshot_begin
+	 * "begin() handler" if present, or indicates that no record
+	 * is to be output in case such handler is not given.
+	 *
+	 * @note This handler is optional; a NULL value indicates that
+	 * nothing needs to be done for rewinding the vfile.  It is
+	 * called with the vfile lock held.
+	 */
+	int (*rewind)(struct xnvfile_snapshot_iterator *it);
+	/**
+	 * @anchor snapshot_begin
+	 * This handler should allocate the snapshot buffer to hold
+	 * records during the data collection phase.  When specified,
+	 * all records collected via the @ref snapshot_next "next()
+	 * handler" will be written to a cell from the memory area
+	 * returned by begin().
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @return A pointer to the record buffer, if the call
+	 * succeeds. Otherwise:
+	 *
+	 * - NULL in case of allocation error. This will abort the data
+	 * collection, and return -ENOMEM to the reader.
+	 *
+	 * - VFILE_SEQ_EMPTY, a special value indicating that no
+	 * record will be output. In such a case, the @ref
+	 * snapshot_next "next() handler" will not be called, and the
+	 * data collection will stop immediately. However, the @ref
+	 * snapshot_show "show() handler" will still be called once,
+	 * with a NULL data pointer (i.e. header display request).
+	 *
+	 * @note This handler is optional; if none is given, an
+	 * internal allocation depending on the value returned by the
+	 * @ref snapshot_rewind "rewind() handler" can be obtained.
+	 */
+	void *(*begin)(struct xnvfile_snapshot_iterator *it);
+	/**
+	 * @anchor snapshot_end
+	 * This handler releases the memory buffer previously obtained
+	 * from begin(). It is usually called after the snapshot data
+	 * has been output by show(), but it may also be called before
+	 * rewinding the vfile after a revision change, to release the
+	 * dropped buffer.
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param buf A pointer to the buffer to release.
+	 *
+	 * @note This routine is optional and the pointer may be
+	 * NULL. It is not needed upon internal buffer allocation;
+	 * see the description of the @ref snapshot_rewind "rewind()
+	 * handler".
+	 */
+	void (*end)(struct xnvfile_snapshot_iterator *it, void *buf);
+	/**
+	 * @anchor snapshot_next
+	 * This handler fetches the next record, as part of the
+	 * snapshot data to be sent back to the reader via the
+	 * show().
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param data A pointer to the record to fill in.
+	 *
+	 * @return a strictly positive value, if the call succeeds and
+	 * leaves a valid record into @a data, which should be passed
+	 * to the @ref snapshot_show "show() handler()" during the
+	 * formatting and output phase. Otherwise:
+	 *
+	 * - A negative error code. This will abort the data
+	 * collection, and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped. In such a case, the @a
+	 * data pointer is not advanced to the next position before
+	 * the @ref snapshot_next "next() handler" is called anew.
+	 *
+	 * @note This handler is called with the vfile lock
+	 * held. Before each invocation of this handler, the vfile
+	 * core checks whether the revision tag has been touched, in
+	 * which case the data collection is restarted from scratch. A
+	 * data collection phase succeeds whenever all records can be
+	 * fetched via the @ref snapshot_next "next() handler", while
+	 * the revision tag remains unchanged, which indicates that a
+	 * consistent snapshot of the object state was taken.
+	 */
+	int (*next)(struct xnvfile_snapshot_iterator *it, void *data);
+	/**
+	 * @anchor snapshot_show
+	 * This handler should format and output a record from the
+	 * collected data.
+	 *
+	 * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and
+	 * xnvfile_putc() are available to format and/or emit the
+	 * output. All routines take the iterator argument @a it as
+	 * their first parameter.
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param data A pointer to the record to format then
+	 * output. The first call to the handler is always passed a
+	 * NULL @a data pointer; the show handler should test this
+	 * special value to output any header that fits, prior to
+	 * receiving more calls with actual records.
+	 *
+	 * @return zero if the call succeeds, also indicating that the
+	 * handler should be called for the next record if
+	 * any. Otherwise:
+	 *
+	 * - A negative error code. This will abort the output phase,
+	 * and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped and will not be output.
+	 */
+	int (*show)(struct xnvfile_snapshot_iterator *it, void *data);
+	/**
+	 * @anchor snapshot_store
+	 * This handler receives data written to the vfile, likely for
+	 * updating the associated Xenomai object's state, or
+	 * triggering any other action which fits. This is the only
+	 * handler which deals with the write-side of a vfile.  It is
+	 * called when writing to the /proc entry of the vfile
+	 * from a user-space process.
+	 *
+	 * The input data is described by a descriptor passed to the
+	 * handler, which may be subsequently passed to parsing helper
+	 * routines.  For instance, xnvfile_get_string() will accept
+	 * the input descriptor for returning the written data as a
+	 * null-terminated character string. On the other hand,
+	 * xnvfile_get_integer() will attempt to return a long integer
+	 * from the input data.
+	 *
+	 * @param input A pointer to an input descriptor. It refers to
+	 * an opaque data from the handler's standpoint.
+	 *
+	 * @return the number of bytes read from the input descriptor
+	 * if the call succeeds. Otherwise, a negative error code.
+	 * Return values from parsing helper routines are commonly
+	 * passed back to the caller by the @ref snapshot_store
+	 * "store() handler".
+	 *
+	 * @note This handler is optional, and may be omitted for
+	 * read-only vfiles.
+	 */
+	ssize_t (*store)(struct xnvfile_input *input);
+};
+
+/**
+ * @brief Snapshot revision tag
+ * @anchor revision_tag
+ *
+ * This structure defines a revision tag to be used with @ref
+ * snapshot_vfile "snapshot-driven vfiles".
+ */
+struct xnvfile_rev_tag {
+	/** Current revision number. */
+	int rev;
+};
+
+struct xnvfile_snapshot_template {
+	size_t privsz;
+	size_t datasz;
+	struct xnvfile_rev_tag *tag;
+	struct xnvfile_snapshot_ops *ops;
+	struct xnvfile_lock_ops *lockops;
+};
+
+/**
+ * @brief Snapshot vfile descriptor
+ * @anchor snapshot_vfile
+ *
+ * This structure describes a snapshot-driven vfile.  Reading from
+ * such a vfile involves a preliminary data collection phase under
+ * lock protection, and a subsequent formatting and output phase of
+ * the collected data records. Locking is done in a way that does not
+ * increase worst-case latency, regardless of the number of records to
+ * be collected for output.
+ */
+struct xnvfile_snapshot {
+	struct xnvfile entry;
+	size_t privsz;
+	size_t datasz;
+	struct xnvfile_rev_tag *tag;
+	struct xnvfile_snapshot_ops *ops;
+};
+
+/**
+ * @brief Snapshot-driven vfile iterator
+ * @anchor snapshot_iterator
+ *
+ * This structure defines an iterator over a snapshot-driven vfile.
+ */
+struct xnvfile_snapshot_iterator {
+	/** Number of collected records. */
+	int nrdata;
+	/** Address of record buffer. */
+	caddr_t databuf;
+	/** Backlink to the host sequential file supporting the vfile. */
+	struct seq_file *seq;
+	/** Backlink to the vfile being read. */
+	struct xnvfile_snapshot *vfile;
+	/** Buffer release handler. */
+	void (*endfn)(struct xnvfile_snapshot_iterator *it, void *buf);
+	/**
+	 * Start of private area. Use xnvfile_iterator_priv() to
+	 * address it.
+	 */
+	char private[0];
+};
+
+struct xnvfile_directory {
+	struct xnvfile entry;
+};
+
+struct xnvfile_link {
+	struct xnvfile entry;
+};
+
+/* vfile.begin()=> */
+#define VFILE_SEQ_EMPTY			((void *)-1)
+/* =>vfile.show() */
+#define VFILE_SEQ_START			SEQ_START_TOKEN
+/* vfile.next/show()=> */
+#define VFILE_SEQ_SKIP			2
+
+#define xnvfile_printf(it, args...)	seq_printf((it)->seq, ##args)
+#define xnvfile_write(it, data, len)	seq_write((it)->seq, (data),(len))
+#define xnvfile_puts(it, s)		seq_puts((it)->seq, (s))
+#define xnvfile_putc(it, c)		seq_putc((it)->seq, (c))
+
+static inline void xnvfile_touch_tag(struct xnvfile_rev_tag *tag)
+{
+	tag->rev++;
+}
+
+static inline void xnvfile_touch(struct xnvfile_snapshot *vfile)
+{
+	xnvfile_touch_tag(vfile->tag);
+}
+
+#define xnvfile_noentry			\
+	{				\
+		.pde = NULL,		\
+		.private = NULL,	\
+		.file = NULL,		\
+		.refcnt = 0,		\
+	}
+
+#define xnvfile_nodir	{ .entry = xnvfile_noentry }
+#define xnvfile_nolink	{ .entry = xnvfile_noentry }
+#define xnvfile_nofile	{ .entry = xnvfile_noentry }
+
+#define xnvfile_priv(e)			((e)->entry.private)
+#define xnvfile_nref(e)			((e)->entry.refcnt)
+#define xnvfile_file(e)			((e)->entry.file)
+#define xnvfile_iterator_priv(it)	((void *)(&(it)->private))
+
+extern struct xnvfile_nklock_class xnvfile_nucleus_lock;
+
+extern struct xnvfile_directory cobalt_vfroot;
+
+int xnvfile_init_root(void);
+
+void xnvfile_destroy_root(void);
+
+int xnvfile_init_snapshot(const char *name,
+			  struct xnvfile_snapshot *vfile,
+			  struct xnvfile_directory *parent);
+
+int xnvfile_init_regular(const char *name,
+			 struct xnvfile_regular *vfile,
+			 struct xnvfile_directory *parent);
+
+int xnvfile_init_dir(const char *name,
+		     struct xnvfile_directory *vdir,
+		     struct xnvfile_directory *parent);
+
+int xnvfile_init_link(const char *from,
+		      const char *to,
+		      struct xnvfile_link *vlink,
+		      struct xnvfile_directory *parent);
+
+void xnvfile_destroy(struct xnvfile *vfile);
+
+ssize_t xnvfile_get_blob(struct xnvfile_input *input,
+			 void *data, size_t size);
+
+ssize_t xnvfile_get_string(struct xnvfile_input *input,
+			   char *s, size_t maxlen);
+
+ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp);
+
+int __vfile_hostlock_get(struct xnvfile *vfile);
+
+void __vfile_hostlock_put(struct xnvfile *vfile);
+
+static inline
+void xnvfile_destroy_snapshot(struct xnvfile_snapshot *vfile)
+{
+	xnvfile_destroy(&vfile->entry);
+}
+
+static inline
+void xnvfile_destroy_regular(struct xnvfile_regular *vfile)
+{
+	xnvfile_destroy(&vfile->entry);
+}
+
+static inline
+void xnvfile_destroy_dir(struct xnvfile_directory *vdir)
+{
+	xnvfile_destroy(&vdir->entry);
+}
+
+static inline
+void xnvfile_destroy_link(struct xnvfile_link *vlink)
+{
+	xnvfile_destroy(&vlink->entry);
+}
+
+#define DEFINE_VFILE_HOSTLOCK(name)					\
+	struct xnvfile_hostlock_class name = {				\
+		.ops = {						\
+			.get = __vfile_hostlock_get,			\
+			.put = __vfile_hostlock_put,			\
+		},							\
+		.mutex = __MUTEX_INITIALIZER(name.mutex),		\
+	}
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+#define xnvfile_touch_tag(tag)	do { } while (0)
+
+#define xnvfile_touch(vfile)	do { } while (0)
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_VFILE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/synch.h	2022-03-21 12:58:31.727866163 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SYNCH_H
+#define _COBALT_KERNEL_SYNCH_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/uapi/kernel/synch.h>
+#include <cobalt/uapi/kernel/thread.h>
+
+/**
+ * @addtogroup cobalt_core_synch
+ * @{
+ */
+#define XNSYNCH_CLAIMED  0x100	/* Claimed by other thread(s) (PI) */
+#define XNSYNCH_CEILING  0x200	/* Actively boosting (PP) */
+
+/* Spare flags usable by upper interfaces */
+#define XNSYNCH_SPARE0  0x01000000
+#define XNSYNCH_SPARE1  0x02000000
+#define XNSYNCH_SPARE2  0x04000000
+#define XNSYNCH_SPARE3  0x08000000
+#define XNSYNCH_SPARE4  0x10000000
+#define XNSYNCH_SPARE5  0x20000000
+#define XNSYNCH_SPARE6  0x40000000
+#define XNSYNCH_SPARE7  0x80000000
+
+/* Statuses */
+#define XNSYNCH_DONE    0	/* Resource available / operation complete */
+#define XNSYNCH_WAIT    1	/* Calling thread blocked -- start rescheduling */
+#define XNSYNCH_RESCHED 2	/* Force rescheduling */
+
+struct xnthread;
+struct xnsynch;
+
+struct xnsynch {
+	/** wait (weighted) prio in thread->boosters */
+	int wprio;
+	/** thread->boosters */
+	struct list_head next;
+	/**
+	 *  &variable holding the current priority ceiling value
+	 *  (xnsched_class_rt-based, [1..255], XNSYNCH_PP).
+	 */
+	u32 *ceiling_ref;
+	/** Status word */
+	unsigned long status;
+	/** Pending threads */
+	struct list_head pendq;
+	/** Thread which owns the resource */
+	struct xnthread *owner;
+	 /** Pointer to fast lock word */
+	atomic_t *fastlock;
+	/* Cleanup handler */
+	void (*cleanup)(struct xnsynch *synch);
+};
+
+#define XNSYNCH_WAITQUEUE_INITIALIZER(__name) {		\
+		.status = XNSYNCH_PRIO,			\
+		.wprio = -1,				\
+		.pendq = LIST_HEAD_INIT((__name).pendq),	\
+		.owner = NULL,				\
+		.cleanup = NULL,			\
+		.fastlock = NULL,			\
+	}
+
+#define DEFINE_XNWAITQ(__name)	\
+	struct xnsynch __name = XNSYNCH_WAITQUEUE_INITIALIZER(__name)
+
+static inline void xnsynch_set_status(struct xnsynch *synch, int bits)
+{
+	synch->status |= bits;
+}
+
+static inline void xnsynch_clear_status(struct xnsynch *synch, int bits)
+{
+	synch->status &= ~bits;
+}
+
+#define xnsynch_for_each_sleeper(__pos, __synch)		\
+	list_for_each_entry(__pos, &(__synch)->pendq, plink)
+
+#define xnsynch_for_each_sleeper_safe(__pos, __tmp, __synch)	\
+	list_for_each_entry_safe(__pos, __tmp, &(__synch)->pendq, plink)
+
+static inline int xnsynch_pended_p(struct xnsynch *synch)
+{
+	return !list_empty(&synch->pendq);
+}
+
+static inline struct xnthread *xnsynch_owner(struct xnsynch *synch)
+{
+	return synch->owner;
+}
+
+#define xnsynch_fastlock(synch)		((synch)->fastlock)
+#define xnsynch_fastlock_p(synch)	((synch)->fastlock != NULL)
+#define xnsynch_owner_check(synch, thread) \
+	xnsynch_fast_owner_check((synch)->fastlock, thread->handle)
+
+#ifdef CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED
+
+void xnsynch_detect_relaxed_owner(struct xnsynch *synch,
+				  struct xnthread *sleeper);
+
+void xnsynch_detect_boosted_relax(struct xnthread *owner);
+
+#else /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */
+
+static inline void xnsynch_detect_relaxed_owner(struct xnsynch *synch,
+				  struct xnthread *sleeper) { }
+
+static inline void xnsynch_detect_boosted_relax(struct xnthread *owner) { }
+
+#endif /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */
+
+void xnsynch_init(struct xnsynch *synch, int flags,
+		  atomic_t *fastlock);
+
+void xnsynch_init_protect(struct xnsynch *synch, int flags,
+			  atomic_t *fastlock, u32 *ceiling_ref);
+
+int xnsynch_destroy(struct xnsynch *synch);
+
+void xnsynch_commit_ceiling(struct xnthread *curr);
+
+static inline void xnsynch_register_cleanup(struct xnsynch *synch,
+					    void (*handler)(struct xnsynch *))
+{
+	synch->cleanup = handler;
+}
+
+int __must_check xnsynch_sleep_on(struct xnsynch *synch,
+				  xnticks_t timeout,
+				  xntmode_t timeout_mode);
+
+struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch);
+
+int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr);
+
+void xnsynch_wakeup_this_sleeper(struct xnsynch *synch,
+				 struct xnthread *sleeper);
+
+int __must_check xnsynch_acquire(struct xnsynch *synch,
+				 xnticks_t timeout,
+				 xntmode_t timeout_mode);
+
+int __must_check xnsynch_try_acquire(struct xnsynch *synch);
+
+bool xnsynch_release(struct xnsynch *synch, struct xnthread *thread);
+
+struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch);
+
+int xnsynch_flush(struct xnsynch *synch, int reason);
+
+void xnsynch_requeue_sleeper(struct xnthread *thread);
+
+void xnsynch_forget_sleeper(struct xnthread *thread);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SYNCH_H_ */
+++ linux-patched/include/xenomai/cobalt/kernel/sched.h	2022-03-21 12:58:31.720866231 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/map.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_H
+#define _COBALT_KERNEL_SCHED_H
+
+#include <linux/percpu.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/schedqueue.h>
+#include <cobalt/kernel/sched-tp.h>
+#include <cobalt/kernel/sched-weak.h>
+#include <cobalt/kernel/sched-sporadic.h>
+#include <cobalt/kernel/sched-quota.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/kernel/assert.h>
+#include <asm/xenomai/machine.h>
+#include <pipeline/sched.h>
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/* Sched status flags */
+#define XNRESCHED	0x10000000	/* Needs rescheduling */
+#define XNINSW		0x20000000	/* In context switch */
+#define XNINTCK		0x40000000	/* In master tick handler context */
+
+/* Sched local flags */
+#define XNIDLE		0x00010000	/* Idle (no outstanding timer) */
+#define XNHTICK		0x00008000	/* Host tick pending  */
+#define XNINIRQ		0x00004000	/* In IRQ handling context */
+#define XNHDEFER	0x00002000	/* Host tick deferred */
+
+/*
+ * Hardware timer is stopped.
+ */
+#define XNTSTOP		0x00000800
+
+struct xnsched_rt {
+	xnsched_queue_t runnable;	/*!< Runnable thread queue. */
+};
+
+/*!
+ * \brief Scheduling information structure.
+ */
+
+struct xnsched {
+	/*!< Scheduler specific status bitmask. */
+	unsigned long status;
+	/*!< Scheduler specific local flags bitmask. */
+	unsigned long lflags;
+	/*!< Current thread. */
+	struct xnthread *curr;
+#ifdef CONFIG_SMP
+	/*!< Owner CPU id. */
+	int cpu;
+	/*!< Mask of CPUs needing rescheduling. */
+	cpumask_t resched;
+#endif
+	/*!< Context of built-in real-time class. */
+	struct xnsched_rt rt;
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	/*!< Context of weak scheduling class. */
+	struct xnsched_weak weak;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	/*!< Context of TP class. */
+	struct xnsched_tp tp;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	/*!< Context of sporadic scheduling class. */
+	struct xnsched_sporadic pss;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	/*!< Context of runtime quota scheduling. */
+	struct xnsched_quota quota;
+#endif
+	/*!< Interrupt nesting level. */
+	volatile unsigned inesting;
+	/*!< Host timer. */
+	struct xntimer htimer;
+	/*!< Round-robin timer. */
+	struct xntimer rrbtimer;
+	/*!< Root thread control block. */
+	struct xnthread rootcb;
+#ifdef CONFIG_XENO_ARCH_FPU
+	/*!< Thread owning the current FPU context. */
+	struct xnthread *fpuholder;
+#endif
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	/*!< Watchdog timer object. */
+	struct xntimer wdtimer;
+#endif
+#ifdef CONFIG_XENO_OPT_STATS
+	/*!< Last account switch date (ticks). */
+	xnticks_t last_account_switch;
+	/*!< Currently active account */
+	xnstat_exectime_t *current_account;
+#endif
+};
+
+DECLARE_PER_CPU(struct xnsched, nksched);
+
+extern cpumask_t cobalt_cpu_affinity;
+
+extern struct list_head nkthreadq;
+
+extern int cobalt_nrthreads;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+extern struct xnvfile_rev_tag nkthreadlist_tag;
+#endif
+
+union xnsched_policy_param;
+
+struct xnsched_class {
+	void (*sched_init)(struct xnsched *sched);
+	void (*sched_enqueue)(struct xnthread *thread);
+	void (*sched_dequeue)(struct xnthread *thread);
+	void (*sched_requeue)(struct xnthread *thread);
+	struct xnthread *(*sched_pick)(struct xnsched *sched);
+	void (*sched_tick)(struct xnsched *sched);
+	void (*sched_rotate)(struct xnsched *sched,
+			     const union xnsched_policy_param *p);
+	void (*sched_migrate)(struct xnthread *thread,
+			      struct xnsched *sched);
+	int (*sched_chkparam)(struct xnthread *thread,
+			      const union xnsched_policy_param *p);
+	/**
+	 * Set base scheduling parameters. This routine is indirectly
+	 * called upon a change of base scheduling settings through
+	 * __xnthread_set_schedparam() -> xnsched_set_policy(),
+	 * exclusively.
+	 *
+	 * The scheduling class implementation should do the necessary
+	 * housekeeping to comply with the new settings.
+	 * thread->base_class is up to date before the call is made,
+	 * and should be considered for the new weighted priority
+	 * calculation. On the contrary, thread->sched_class should
+	 * NOT be referred to by this handler.
+	 *
+	 * sched_setparam() is NEVER involved in PI or PP
+	 * management. However it must deny a priority update if it
+	 * contradicts an ongoing boost for @a thread. This is
+	 * typically what the xnsched_set_effective_priority() helper
+	 * does for such handler.
+	 *
+	 * @param thread Affected thread.
+	 * @param p New base policy settings.
+	 *
+	 * @return True if the effective priority was updated
+	 * (thread->cprio).
+	 */
+	bool (*sched_setparam)(struct xnthread *thread,
+			       const union xnsched_policy_param *p);
+	void (*sched_getparam)(struct xnthread *thread,
+			       union xnsched_policy_param *p);
+	void (*sched_trackprio)(struct xnthread *thread,
+				const union xnsched_policy_param *p);
+	void (*sched_protectprio)(struct xnthread *thread, int prio);
+	int (*sched_declare)(struct xnthread *thread,
+			     const union xnsched_policy_param *p);
+	void (*sched_forget)(struct xnthread *thread);
+	void (*sched_kick)(struct xnthread *thread);
+#ifdef CONFIG_XENO_OPT_VFILE
+	int (*sched_init_vfile)(struct xnsched_class *schedclass,
+				struct xnvfile_directory *vfroot);
+	void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
+#endif
+	int nthreads;
+	struct xnsched_class *next;
+	int weight;
+	int policy;
+	const char *name;
+};
+
+#define XNSCHED_CLASS_WEIGHT(n)		(n * XNSCHED_CLASS_WEIGHT_FACTOR)
+
+/* Placeholder for current thread priority */
+#define XNSCHED_RUNPRIO   0x80000000
+
+#define xnsched_for_each_thread(__thread)	\
+	list_for_each_entry(__thread, &nkthreadq, glink)
+
+#ifdef CONFIG_SMP
+static inline int xnsched_cpu(struct xnsched *sched)
+{
+	return sched->cpu;
+}
+#else /* !CONFIG_SMP */
+static inline int xnsched_cpu(struct xnsched *sched)
+{
+	return 0;
+}
+#endif /* CONFIG_SMP */
+
+static inline struct xnsched *xnsched_struct(int cpu)
+{
+	return &per_cpu(nksched, cpu);
+}
+
+static inline struct xnsched *xnsched_current(void)
+{
+	/* IRQs off */
+	return raw_cpu_ptr(&nksched);
+}
+
+static inline struct xnthread *xnsched_current_thread(void)
+{
+	return xnsched_current()->curr;
+}
+
+/* Test resched flag of given sched. */
+static inline int xnsched_resched_p(struct xnsched *sched)
+{
+	return sched->status & XNRESCHED;
+}
+
+/* Set self resched flag for the current scheduler. */
+static inline void xnsched_set_self_resched(struct xnsched *sched)
+{
+	sched->status |= XNRESCHED;
+}
+
+/* Set resched flag for the given scheduler. */
+#ifdef CONFIG_SMP
+
+static inline void xnsched_set_resched(struct xnsched *sched)
+{
+	struct xnsched *current_sched = xnsched_current();
+
+	if (current_sched == sched)
+		current_sched->status |= XNRESCHED;
+	else if (!xnsched_resched_p(sched)) {
+		cpumask_set_cpu(xnsched_cpu(sched), &current_sched->resched);
+		sched->status |= XNRESCHED;
+		current_sched->status |= XNRESCHED;
+	}
+}
+
+#define xnsched_realtime_cpus    cobalt_pipeline.supported_cpus
+
+static inline int xnsched_supported_cpu(int cpu)
+{
+	return cpumask_test_cpu(cpu, &xnsched_realtime_cpus);
+}
+
+static inline int xnsched_threading_cpu(int cpu)
+{
+	return cpumask_test_cpu(cpu, &cobalt_cpu_affinity);
+}
+
+#else /* !CONFIG_SMP */
+
+static inline void xnsched_set_resched(struct xnsched *sched)
+{
+	xnsched_set_self_resched(sched);
+}
+
+#define xnsched_realtime_cpus CPU_MASK_ALL
+
+static inline int xnsched_supported_cpu(int cpu)
+{
+	return 1;
+}
+
+static inline int xnsched_threading_cpu(int cpu)
+{
+	return 1;
+}
+
+#endif /* !CONFIG_SMP */
+
+#define for_each_realtime_cpu(cpu)		\
+	for_each_online_cpu(cpu)		\
+		if (xnsched_supported_cpu(cpu))	\
+
+int ___xnsched_run(struct xnsched *sched);
+
+void __xnsched_run_handler(void);
+
+static inline int __xnsched_run(struct xnsched *sched)
+{
+	/*
+	 * Reschedule if XNSCHED is pending, but never over an IRQ
+	 * handler or in the middle of unlocked context switch.
+	 */
+	if (((sched->status|sched->lflags) &
+	     (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
+		return 0;
+
+	return pipeline_schedule(sched);
+}
+
+static inline int xnsched_run(void)
+{
+	struct xnsched *sched = xnsched_current();
+	/*
+	 * sched->curr is shared locklessly with ___xnsched_run().
+	 * READ_ONCE() makes sure the compiler never uses load tearing
+	 * for reading this pointer piecemeal, so that multiple stores
+	 * occurring concurrently on remote CPUs never yield a
+	 * spurious merged value on the local one.
+	 */
+	struct xnthread *curr = READ_ONCE(sched->curr);
+
+	/*
+	 * If running over the root thread, hard irqs must be off
+	 * (asserted out of line in ___xnsched_run()).
+	 */
+	return curr->lock_count > 0 ? 0 : __xnsched_run(sched);
+}
+
+void xnsched_lock(void);
+
+void xnsched_unlock(void);
+
+static inline int xnsched_interrupt_p(void)
+{
+	return xnsched_current()->lflags & XNINIRQ;
+}
+
+static inline int xnsched_root_p(void)
+{
+	return xnthread_test_state(xnsched_current_thread(), XNROOT);
+}
+
+static inline int xnsched_unblockable_p(void)
+{
+	return xnsched_interrupt_p() || xnsched_root_p();
+}
+
+static inline int xnsched_primary_p(void)
+{
+	return !xnsched_unblockable_p();
+}
+
+bool xnsched_set_effective_priority(struct xnthread *thread,
+				    int prio);
+
+#include <cobalt/kernel/sched-idle.h>
+#include <cobalt/kernel/sched-rt.h>
+
+int xnsched_init_proc(void);
+
+void xnsched_cleanup_proc(void);
+
+void xnsched_register_classes(void);
+
+void xnsched_init_all(void);
+
+void xnsched_destroy_all(void);
+
+struct xnthread *xnsched_pick_next(struct xnsched *sched);
+
+void xnsched_putback(struct xnthread *thread);
+
+int xnsched_set_policy(struct xnthread *thread,
+		       struct xnsched_class *sched_class,
+		       const union xnsched_policy_param *p);
+
+void xnsched_track_policy(struct xnthread *thread,
+			  struct xnthread *target);
+
+void xnsched_protect_priority(struct xnthread *thread,
+			      int prio);
+
+void xnsched_migrate(struct xnthread *thread,
+		     struct xnsched *sched);
+
+void xnsched_migrate_passive(struct xnthread *thread,
+			     struct xnsched *sched);
+
+/**
+ * @fn void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
+ * @brief Rotate a scheduler runqueue.
+ *
+ * The specified scheduling class is requested to rotate its runqueue
+ * for the given scheduler. Rotation is performed according to the
+ * scheduling parameter specified by @a sched_param.
+ *
+ * @note The nucleus supports round-robin scheduling for the members
+ * of the RT class.
+ *
+ * @param sched The per-CPU scheduler hosting the target scheduling
+ * class.
+ *
+ * @param sched_class The scheduling class which should rotate its
+ * runqueue.
+ *
+ * @param sched_param The scheduling parameter providing rotation
+ * information to the specified scheduling class.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+static inline void xnsched_rotate(struct xnsched *sched,
+				  struct xnsched_class *sched_class,
+				  const union xnsched_policy_param *sched_param)
+{
+	sched_class->sched_rotate(sched, sched_param);
+}
+
+static inline int xnsched_init_thread(struct xnthread *thread)
+{
+	int ret = 0;
+
+	xnsched_idle_init_thread(thread);
+	xnsched_rt_init_thread(thread);
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	ret = xnsched_tp_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_TP */
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	ret = xnsched_sporadic_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	ret = xnsched_quota_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
+
+	return ret;
+}
+
+static inline int xnsched_root_priority(struct xnsched *sched)
+{
+	return sched->rootcb.cprio;
+}
+
+static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
+{
+	return sched->rootcb.sched_class;
+}
+
+static inline void xnsched_tick(struct xnsched *sched)
+{
+	struct xnthread *curr = sched->curr;
+	struct xnsched_class *sched_class = curr->sched_class;
+	/*
+	 * A thread that undergoes round-robin scheduling only
+	 * consumes its time slice when it runs within its own
+	 * scheduling class, which excludes temporary PI boosts, and
+	 * does not hold the scheduler lock.
+	 */
+	if (sched_class == curr->base_class &&
+	    sched_class->sched_tick &&
+	    xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB &&
+		curr->lock_count == 0)
+		sched_class->sched_tick(sched);
+}
+
+static inline int xnsched_chkparam(struct xnsched_class *sched_class,
+				   struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	if (sched_class->sched_chkparam)
+		return sched_class->sched_chkparam(thread, p);
+
+	return 0;
+}
+
+static inline int xnsched_declare(struct xnsched_class *sched_class,
+				  struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	int ret;
+
+	if (sched_class->sched_declare) {
+		ret = sched_class->sched_declare(thread, p);
+		if (ret)
+			return ret;
+	}
+	if (sched_class != thread->base_class)
+		sched_class->nthreads++;
+
+	return 0;
+}
+
+static inline int xnsched_calc_wprio(struct xnsched_class *sched_class,
+				     int prio)
+{
+	return prio + sched_class->weight;
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+
+static inline void xnsched_enqueue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_enqueue(thread);
+}
+
+static inline void xnsched_dequeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_dequeue(thread);
+}
+
+static inline void xnsched_requeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_requeue(thread);
+}
+
+static inline
+bool xnsched_setparam(struct xnthread *thread,
+		      const union xnsched_policy_param *p)
+{
+	return thread->base_class->sched_setparam(thread, p);
+}
+
+static inline void xnsched_getparam(struct xnthread *thread,
+				    union xnsched_policy_param *p)
+{
+	thread->sched_class->sched_getparam(thread, p);
+}
+
+static inline void xnsched_trackprio(struct xnthread *thread,
+				     const union xnsched_policy_param *p)
+{
+	thread->sched_class->sched_trackprio(thread, p);
+	thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
+}
+
+static inline void xnsched_protectprio(struct xnthread *thread, int prio)
+{
+	thread->sched_class->sched_protectprio(thread, prio);
+	thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
+}
+
+static inline void xnsched_forget(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	--sched_class->nthreads;
+
+	if (sched_class->sched_forget)
+		sched_class->sched_forget(thread);
+}
+
+static inline void xnsched_kick(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	xnthread_set_info(thread, XNKICKED);
+
+	if (sched_class->sched_kick)
+		sched_class->sched_kick(thread);
+
+	xnsched_set_resched(thread->sched);
+}
+
+#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
+
+/*
+ * If only the RT and IDLE scheduling classes are compiled in, we can
+ * fully inline common helpers for dealing with those.
+ */
+
+static inline void xnsched_enqueue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_enqueue(thread);
+}
+
+static inline void xnsched_dequeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_dequeue(thread);
+}
+
+static inline void xnsched_requeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_requeue(thread);
+}
+
+static inline bool xnsched_setparam(struct xnthread *thread,
+				    const union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	if (sched_class == &xnsched_class_idle)
+		return __xnsched_idle_setparam(thread, p);
+
+	return __xnsched_rt_setparam(thread, p);
+}
+
+static inline void xnsched_getparam(struct xnthread *thread,
+				    union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_getparam(thread, p);
+	else
+		__xnsched_rt_getparam(thread, p);
+}
+
+static inline void xnsched_trackprio(struct xnthread *thread,
+				     const union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_trackprio(thread, p);
+	else
+		__xnsched_rt_trackprio(thread, p);
+
+	thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
+}
+
+static inline void xnsched_protectprio(struct xnthread *thread, int prio)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_protectprio(thread, prio);
+	else
+		__xnsched_rt_protectprio(thread, prio);
+
+	thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
+}
+
+static inline void xnsched_forget(struct xnthread *thread)
+{
+	--thread->base_class->nthreads;
+	__xnsched_rt_forget(thread);
+}
+
+static inline void xnsched_kick(struct xnthread *thread)
+{
+	xnthread_set_info(thread, XNKICKED);
+	xnsched_set_resched(thread->sched);
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_H */
+++ linux-patched/include/xenomai/cobalt/kernel/map.h	2022-03-21 12:58:31.713866299 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-idle.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_MAP_H
+#define _COBALT_KERNEL_MAP_H
+
+#include <asm/bitsperlong.h>
+
+/**
+ * @addtogroup cobalt_core_map
+ * @{
+ */
+
+#define XNMAP_MAX_KEYS	(BITS_PER_LONG * BITS_PER_LONG)
+
+struct xnmap {
+    int nkeys;
+    int ukeys;
+    int offset;
+    unsigned long himask;
+    unsigned long himap;
+#define __IDMAP_LONGS	((XNMAP_MAX_KEYS+BITS_PER_LONG-1)/BITS_PER_LONG)
+    unsigned long lomap[__IDMAP_LONGS];
+#undef __IDMAP_LONGS
+    void *objarray[1];
+};
+
+struct xnmap *xnmap_create(int nkeys,
+			   int reserve,
+			   int offset);
+
+void xnmap_delete(struct xnmap *map);
+
+int xnmap_enter(struct xnmap *map,
+		int key,
+		void *objaddr);
+
+int xnmap_remove(struct xnmap *map,
+		 int key);
+
+static inline void *xnmap_fetch_nocheck(struct xnmap *map, int key)
+{
+	int ofkey = key - map->offset;
+	return map->objarray[ofkey];
+}
+
+static inline void *xnmap_fetch(struct xnmap *map, int key)
+{
+	int ofkey = key - map->offset;
+
+	if (ofkey < 0 || ofkey >= map->nkeys)
+		return NULL;
+
+	return map->objarray[ofkey];
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_MAP_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-idle.h	2022-03-21 12:58:31.705866377 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-weak.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_IDLE_H
+#define _COBALT_KERNEL_SCHED_IDLE_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-idle.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/* Idle priority level - actually never used for indexing. */
+#define XNSCHED_IDLE_PRIO	-1
+
+extern struct xnsched_class xnsched_class_idle;
+
+static inline bool __xnsched_idle_setparam(struct xnthread *thread,
+					   const union xnsched_policy_param *p)
+{
+	xnthread_clear_state(thread, XNWEAK);
+	return xnsched_set_effective_priority(thread, p->idle.prio);
+}
+
+static inline void __xnsched_idle_getparam(struct xnthread *thread,
+					   union xnsched_policy_param *p)
+{
+	p->idle.prio = thread->cprio;
+}
+
+static inline void __xnsched_idle_trackprio(struct xnthread *thread,
+					    const union xnsched_policy_param *p)
+{
+	if (p)
+		/* Inheriting a priority-less class makes no sense. */
+		XENO_WARN_ON_ONCE(COBALT, 1);
+	else
+		thread->cprio = XNSCHED_IDLE_PRIO;
+}
+
+static inline void __xnsched_idle_protectprio(struct xnthread *thread, int prio)
+{
+	XENO_WARN_ON_ONCE(COBALT, 1);
+}
+
+static inline int xnsched_idle_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_IDLE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-weak.h	2022-03-21 12:58:31.698866445 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/arith.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_WEAK_H
+#define _COBALT_KERNEL_SCHED_WEAK_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-weak.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+
+#define XNSCHED_WEAK_MIN_PRIO	0
+#define XNSCHED_WEAK_MAX_PRIO	99
+#define XNSCHED_WEAK_NR_PRIO	\
+	(XNSCHED_WEAK_MAX_PRIO - XNSCHED_WEAK_MIN_PRIO + 1)
+
+#if XNSCHED_WEAK_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR ||	\
+	(defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&		\
+	 XNSCHED_WEAK_NR_PRIO > XNSCHED_MLQ_LEVELS)
+#error "WEAK class has too many priority levels"
+#endif
+
+extern struct xnsched_class xnsched_class_weak;
+
+struct xnsched_weak {
+	xnsched_queue_t runnable;	/*!< Runnable thread queue. */
+};
+
+static inline int xnsched_weak_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+#endif /* CONFIG_XENO_OPT_SCHED_WEAK */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_WEAK_H */
+++ linux-patched/include/xenomai/cobalt/kernel/arith.h	2022-03-21 12:58:31.691866513 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-tp.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ *   Generic arithmetic/conversion routines.
+ *   Copyright &copy; 2005 Stelian Pop.
+ *   Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ *   Xenomai is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ARITH_H
+#define _COBALT_KERNEL_ARITH_H
+
+#include <asm/byteorder.h>
+#include <asm/div64.h>
+
+#ifdef __BIG_ENDIAN
+#define endianstruct { unsigned int _h; unsigned int _l; }
+#else /* __LITTLE_ENDIAN */
+#define endianstruct { unsigned int _l; unsigned int _h; }
+#endif
+
+#include <asm/xenomai/uapi/arith.h>
+
+#endif /* _COBALT_KERNEL_ARITH_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-tp.h	2022-03-21 12:58:31.683866592 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/ppd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_TP_H
+#define _COBALT_KERNEL_SCHED_TP_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-tp.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+
+#define XNSCHED_TP_MIN_PRIO	1
+#define XNSCHED_TP_MAX_PRIO	255
+#define XNSCHED_TP_NR_PRIO	\
+	(XNSCHED_TP_MAX_PRIO - XNSCHED_TP_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_tp;
+
+struct xnsched_tp_window {
+	xnticks_t w_offset;
+	int w_part;
+};
+
+struct xnsched_tp_schedule {
+	int pwin_nr;
+	xnticks_t tf_duration;
+	atomic_t refcount;
+	struct xnsched_tp_window pwins[0];
+};
+
+struct xnsched_tp {
+	struct xnsched_tpslot {
+		/** Per-partition runqueue. */
+		xnsched_queue_t runnable;
+	} partitions[CONFIG_XENO_OPT_SCHED_TP_NRPART];
+	/** Idle slot for passive windows. */
+	struct xnsched_tpslot idle;
+	/** Active partition slot */
+	struct xnsched_tpslot *tps;
+	/** Time frame timer */
+	struct xntimer tf_timer;
+	/** Global partition schedule */
+	struct xnsched_tp_schedule *gps;
+	/** Window index of next partition */
+	int wnext;
+	/** Start of next time frame */
+	xnticks_t tf_start;
+	/** Assigned thread queue */
+	struct list_head threads;
+};
+
+static inline int xnsched_tp_init_thread(struct xnthread *thread)
+{
+	thread->tps = NULL;
+
+	return 0;
+}
+
+struct xnsched_tp_schedule *
+xnsched_tp_set_schedule(struct xnsched *sched,
+			struct xnsched_tp_schedule *gps);
+
+void xnsched_tp_start_schedule(struct xnsched *sched);
+
+void xnsched_tp_stop_schedule(struct xnsched *sched);
+
+int xnsched_tp_get_partition(struct xnsched *sched);
+
+struct xnsched_tp_schedule *
+xnsched_tp_get_schedule(struct xnsched *sched);
+
+void xnsched_tp_put_schedule(struct xnsched_tp_schedule *gps);
+
+#endif /* CONFIG_XENO_OPT_SCHED_TP */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_TP_H */
+++ linux-patched/include/xenomai/cobalt/kernel/ppd.h	2022-03-21 12:58:31.676866660 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/compat.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright &copy; 2006 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_PPD_H
+#define _COBALT_KERNEL_PPD_H
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/heap.h>
+
+struct cobalt_umm {
+	struct xnheap heap;
+	atomic_t refcount;
+	void (*release)(struct cobalt_umm *umm);
+};
+
+struct cobalt_ppd {
+	struct cobalt_umm umm;
+	atomic_t refcnt;
+	char *exe_path;
+	struct rb_root fds;
+};
+
+extern struct cobalt_ppd cobalt_kernel_ppd;
+
+#endif /* _COBALT_KERNEL_PPD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/compat.h	2022-03-21 12:58:31.668866738 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/assert.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_COMPAT_H
+#define _COBALT_KERNEL_COMPAT_H
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <linux/compat.h>
+#include <net/compat.h>
+#include <asm/xenomai/wrappers.h>
+#include <cobalt/uapi/sched.h>
+
+struct mq_attr;
+
+struct __compat_sched_ss_param {
+	int __sched_low_priority;
+	struct old_timespec32 __sched_repl_period;
+	struct old_timespec32 __sched_init_budget;
+	int __sched_max_repl;
+};
+
+struct __compat_sched_rr_param {
+	struct old_timespec32 __sched_rr_quantum;
+};
+
+struct compat_sched_param_ex {
+	int sched_priority;
+	union {
+		struct __compat_sched_ss_param ss;
+		struct __compat_sched_rr_param rr;
+		struct __sched_tp_param tp;
+		struct __sched_quota_param quota;
+	} sched_u;
+};
+
+struct compat_mq_attr {
+	compat_long_t mq_flags;
+	compat_long_t mq_maxmsg;
+	compat_long_t mq_msgsize;
+	compat_long_t mq_curmsgs;
+};
+
+struct compat_sched_tp_window {
+	struct old_timespec32 offset;
+	struct old_timespec32 duration;
+	int ptid;
+};
+
+struct __compat_sched_config_tp {
+	int op;
+	int nr_windows;
+	struct compat_sched_tp_window windows[0];
+};
+
+union compat_sched_config {
+	struct __compat_sched_config_tp tp;
+	struct __sched_config_quota quota;
+};
+
+#define compat_sched_tp_confsz(nr_win) \
+  (sizeof(struct __compat_sched_config_tp) + nr_win * sizeof(struct compat_sched_tp_window))
+
+typedef struct {
+	compat_ulong_t fds_bits[__FD_SETSIZE / (8 * sizeof(compat_long_t))];
+} compat_fd_set;
+
+struct compat_rtdm_mmap_request {
+	u64 offset;
+	compat_size_t length;
+	int prot;
+	int flags;
+};
+
+int sys32_get_timespec(struct timespec64 *ts,
+		       const struct old_timespec32 __user *cts);
+
+int sys32_put_timespec(struct old_timespec32 __user *cts,
+		       const struct timespec64 *ts);
+
+int sys32_get_itimerspec(struct itimerspec64 *its,
+			 const struct old_itimerspec32 __user *cits);
+
+int sys32_put_itimerspec(struct old_itimerspec32 __user *cits,
+			 const struct itimerspec64 *its);
+
+int sys32_get_timeval(struct __kernel_old_timeval *tv,
+		      const struct old_timeval32 __user *ctv);
+
+int sys32_put_timeval(struct old_timeval32 __user *ctv,
+		      const struct __kernel_old_timeval *tv);
+
+int sys32_get_timex(struct __kernel_timex *tx,
+		    const struct old_timex32 __user *ctx);
+
+int sys32_put_timex(struct old_timex32 __user *ctx,
+		    const struct __kernel_timex *tx);
+
+int sys32_get_fdset(fd_set *fds, const compat_fd_set __user *cfds,
+		    size_t cfdsize);
+
+int sys32_put_fdset(compat_fd_set __user *cfds, const fd_set *fds,
+		    size_t fdsize);
+
+int sys32_get_param_ex(int policy,
+		       struct sched_param_ex *p,
+		       const struct compat_sched_param_ex __user *u_cp);
+
+int sys32_put_param_ex(int policy,
+		       struct compat_sched_param_ex __user *u_cp,
+		       const struct sched_param_ex *p);
+
+int sys32_get_mqattr(struct mq_attr *ap,
+		     const struct compat_mq_attr __user *u_cap);
+
+int sys32_put_mqattr(struct compat_mq_attr __user *u_cap,
+		     const struct mq_attr *ap);
+
+int sys32_get_sigevent(struct sigevent *ev,
+		       const struct compat_sigevent *__user u_cev);
+
+int sys32_get_sigset(sigset_t *set, const compat_sigset_t *u_cset);
+
+int sys32_put_sigset(compat_sigset_t *u_cset, const sigset_t *set);
+
+int sys32_get_sigval(union sigval *val, const union compat_sigval *u_cval);
+
+int sys32_put_siginfo(void __user *u_si, const struct siginfo *si,
+		      int overrun);
+
+int sys32_get_msghdr(struct user_msghdr *msg,
+		     const struct compat_msghdr __user *u_cmsg);
+
+int sys32_get_mmsghdr(struct mmsghdr *mmsg,
+		      const struct compat_mmsghdr __user *u_cmmsg);
+
+int sys32_put_msghdr(struct compat_msghdr __user *u_cmsg,
+		     const struct user_msghdr *msg);
+
+int sys32_put_mmsghdr(struct compat_mmsghdr __user *u_cmmsg,
+		     const struct mmsghdr *mmsg);
+
+int sys32_get_iovec(struct iovec *iov,
+		    const struct compat_iovec __user *ciov,
+		    int ciovlen);
+
+int sys32_put_iovec(struct compat_iovec __user *u_ciov,
+		    const struct iovec *iov,
+		    int iovlen);
+
+#endif /* CONFIG_XENO_ARCH_SYS3264 */
+
+#endif /* !_COBALT_KERNEL_COMPAT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/assert.h	2022-03-21 12:58:31.661866806 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/timer.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ASSERT_H
+#define _COBALT_KERNEL_ASSERT_H
+
+#include <linux/kconfig.h>
+
+#define XENO_INFO	KERN_INFO    "[Xenomai] "
+#define XENO_WARNING	KERN_WARNING "[Xenomai] "
+#define XENO_ERR	KERN_ERR     "[Xenomai] "
+
+#define XENO_DEBUG(__subsys)				\
+	IS_ENABLED(CONFIG_XENO_OPT_DEBUG_##__subsys)
+#define XENO_ASSERT(__subsys, __cond)			\
+	(!WARN_ON(XENO_DEBUG(__subsys) && !(__cond)))
+#define XENO_BUG(__subsys)				\
+	BUG_ON(XENO_DEBUG(__subsys))
+#define XENO_BUG_ON(__subsys, __cond)			\
+	BUG_ON(XENO_DEBUG(__subsys) && (__cond))
+#define XENO_WARN(__subsys, __cond, __fmt...)		\
+	WARN(XENO_DEBUG(__subsys) && (__cond), __fmt)
+#define XENO_WARN_ON(__subsys, __cond)			\
+	WARN_ON(XENO_DEBUG(__subsys) && (__cond))
+#define XENO_WARN_ON_ONCE(__subsys, __cond)		\
+	WARN_ON_ONCE(XENO_DEBUG(__subsys) && (__cond))
+#ifdef CONFIG_SMP
+#define XENO_BUG_ON_SMP(__subsys, __cond)		\
+	XENO_BUG_ON(__subsys, __cond)
+#define XENO_WARN_ON_SMP(__subsys, __cond)		\
+	XENO_WARN_ON(__subsys, __cond)
+#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond)		\
+	XENO_WARN_ON_ONCE(__subsys, __cond)
+#else
+#define XENO_BUG_ON_SMP(__subsys, __cond)		\
+	do { } while (0)
+#define XENO_WARN_ON_SMP(__subsys, __cond)		\
+	do { } while (0)
+#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond)		\
+	do { } while (0)
+#endif
+
+#define TODO()    BUILD_BUG_ON(IS_ENABLED(CONFIG_XENO_TODO))
+
+#define primary_mode_only()	XENO_BUG_ON(CONTEXT, is_secondary_domain())
+#define secondary_mode_only()	XENO_BUG_ON(CONTEXT, !is_secondary_domain())
+#define interrupt_only()	XENO_BUG_ON(CONTEXT, !xnsched_interrupt_p())
+#define realtime_cpu_only()	XENO_BUG_ON(CONTEXT, !xnsched_supported_cpu(raw_smp_processor_id()))
+#define thread_only()		XENO_BUG_ON(CONTEXT, xnsched_interrupt_p())
+#define irqoff_only()		XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0)
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+#define atomic_only()		XENO_BUG_ON(CONTEXT, (xnlock_is_owner(&nklock) && hard_irqs_disabled()) == 0)
+#define preemptible_only()	XENO_BUG_ON(CONTEXT, xnlock_is_owner(&nklock) || hard_irqs_disabled())
+#else
+#define atomic_only()		XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0)
+#define preemptible_only()	XENO_BUG_ON(CONTEXT, hard_irqs_disabled() != 0)
+#endif
+
+#endif /* !_COBALT_KERNEL_ASSERT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/timer.h	2022-03-21 12:58:31.654866874 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/init.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _COBALT_KERNEL_TIMER_H
+#define _COBALT_KERNEL_TIMER_H
+
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/ancillaries.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @addtogroup cobalt_core_timer
+ * @{
+ */
+#define XN_INFINITE   ((xnticks_t)0)
+#define XN_NONBLOCK   ((xnticks_t)-1)
+
+/* Timer modes */
+typedef enum xntmode {
+	XN_RELATIVE,
+	XN_ABSOLUTE,
+	XN_REALTIME
+} xntmode_t;
+
+/* Timer status */
+#define XNTIMER_DEQUEUED  0x00000001
+#define XNTIMER_KILLED    0x00000002
+#define XNTIMER_PERIODIC  0x00000004
+#define XNTIMER_REALTIME  0x00000008
+#define XNTIMER_FIRED     0x00000010
+#define XNTIMER_RUNNING   0x00000020
+#define XNTIMER_KGRAVITY  0x00000040
+#define XNTIMER_UGRAVITY  0x00000080
+#define XNTIMER_IGRAVITY  0	     /* most conservative */
+
+#define XNTIMER_GRAVITY_MASK	(XNTIMER_KGRAVITY|XNTIMER_UGRAVITY)
+#define XNTIMER_INIT_MASK	XNTIMER_GRAVITY_MASK
+
+/* These flags are available to the real-time interfaces */
+#define XNTIMER_SPARE0  0x01000000
+#define XNTIMER_SPARE1  0x02000000
+#define XNTIMER_SPARE2  0x04000000
+#define XNTIMER_SPARE3  0x08000000
+#define XNTIMER_SPARE4  0x10000000
+#define XNTIMER_SPARE5  0x20000000
+#define XNTIMER_SPARE6  0x40000000
+#define XNTIMER_SPARE7  0x80000000
+
+/* Timer priorities */
+#define XNTIMER_LOPRIO  (-999999999)
+#define XNTIMER_STDPRIO 0
+#define XNTIMER_HIPRIO  999999999
+
+struct xntlholder {
+	struct list_head link;
+	xnticks_t key;
+	int prio;
+};
+
+#define xntlholder_date(h)	((h)->key)
+#define xntlholder_prio(h)	((h)->prio)
+#define xntlist_init(q)		INIT_LIST_HEAD(q)
+#define xntlist_empty(q)	list_empty(q)
+
+static inline struct xntlholder *xntlist_head(struct list_head *q)
+{
+	if (list_empty(q))
+		return NULL;
+
+	return list_first_entry(q, struct xntlholder, link);
+}
+
+static inline struct xntlholder *xntlist_next(struct list_head *q,
+					      struct xntlholder *h)
+{
+	if (list_is_last(&h->link, q))
+		return NULL;
+
+	return list_entry(h->link.next, struct xntlholder, link);
+}
+
+static inline struct xntlholder *xntlist_second(struct list_head *q,
+	struct xntlholder *h)
+{
+	return xntlist_next(q, h);
+}
+
+static inline void xntlist_insert(struct list_head *q, struct xntlholder *holder)
+{
+	struct xntlholder *p;
+
+	if (list_empty(q)) {
+		list_add(&holder->link, q);
+		return;
+	}
+
+	/*
+	 * Insert the new timer at the proper place in the single
+	 * queue. O(N) here, but this is the price for the increased
+	 * flexibility...
+	 */
+	list_for_each_entry_reverse(p, q, link) {
+		if ((xnsticks_t) (holder->key - p->key) > 0 ||
+		    (holder->key == p->key && holder->prio <= p->prio))
+		  break;
+	}
+
+	list_add(&holder->link, &p->link);
+}
+
+#define xntlist_remove(q, h)			\
+	do {					\
+		(void)(q);			\
+		list_del(&(h)->link);		\
+	} while (0)
+
+#if defined(CONFIG_XENO_OPT_TIMER_RBTREE)
+
+#include <linux/rbtree.h>
+
+typedef struct {
+	unsigned long long date;
+	unsigned prio;
+	struct rb_node link;
+} xntimerh_t;
+
+#define xntimerh_date(h) ((h)->date)
+#define xntimerh_prio(h) ((h)->prio)
+#define xntimerh_init(h) do { } while (0)
+
+typedef struct {
+	struct rb_root root;
+	xntimerh_t *head;
+} xntimerq_t;
+
+#define xntimerq_init(q)			\
+	({					\
+		xntimerq_t *_q = (q);		\
+		_q->root = RB_ROOT;		\
+		_q->head = NULL;		\
+	})
+
+#define xntimerq_destroy(q) do { } while (0)
+#define xntimerq_empty(q) ((q)->head == NULL)
+
+#define xntimerq_head(q) ((q)->head)
+
+#define xntimerq_next(q, h)						\
+	({								\
+		struct rb_node *_node = rb_next(&(h)->link);		\
+		_node ? (container_of(_node, xntimerh_t, link)) : NULL; \
+	})
+
+#define xntimerq_second(q, h) xntimerq_next(q, h)
+
+void xntimerq_insert(xntimerq_t *q, xntimerh_t *holder);
+
+static inline void xntimerq_remove(xntimerq_t *q, xntimerh_t *holder)
+{
+	if (holder == q->head)
+		q->head = xntimerq_second(q, holder);
+
+	rb_erase(&holder->link, &q->root);
+}
+
+typedef struct { } xntimerq_it_t;
+
+#define xntimerq_it_begin(q,i)	((void) (i), xntimerq_head(q))
+#define xntimerq_it_next(q,i,h) ((void) (i), xntimerq_next((q),(h)))
+
+#else /* CONFIG_XENO_OPT_TIMER_LIST */
+
+typedef struct xntlholder xntimerh_t;
+
+#define xntimerh_date(h)       xntlholder_date(h)
+#define xntimerh_prio(h)       xntlholder_prio(h)
+#define xntimerh_init(h)       do { } while (0)
+
+typedef struct list_head xntimerq_t;
+
+#define xntimerq_init(q)        xntlist_init(q)
+#define xntimerq_destroy(q)     do { } while (0)
+#define xntimerq_empty(q)       xntlist_empty(q)
+#define xntimerq_head(q)        xntlist_head(q)
+#define xntimerq_second(q, h)   xntlist_second((q),(h))
+#define xntimerq_insert(q, h)   xntlist_insert((q),(h))
+#define xntimerq_remove(q, h)   xntlist_remove((q),(h))
+
+typedef struct { } xntimerq_it_t;
+
+#define xntimerq_it_begin(q,i)  ((void) (i), xntlist_head(q))
+#define xntimerq_it_next(q,i,h) ((void) (i), xntlist_next((q),(h)))
+
+#endif /* CONFIG_XENO_OPT_TIMER_LIST */
+
+struct xnsched;
+
+struct xntimerdata {
+	xntimerq_t q;
+};
+
+static inline struct xntimerdata *
+xnclock_percpu_timerdata(struct xnclock *clock, int cpu)
+{
+	return per_cpu_ptr(clock->timerdata, cpu);
+}
+
+static inline struct xntimerdata *
+xnclock_this_timerdata(struct xnclock *clock)
+{
+	return raw_cpu_ptr(clock->timerdata);
+}
+
+struct xntimer {
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	struct xnclock *clock;
+#endif
+	/** Link in timers list. */
+	xntimerh_t aplink;
+	struct list_head adjlink;
+	/** Timer status. */
+	unsigned long status;
+	/** Periodic interval (clock ticks, 0 == one shot). */
+	xnticks_t interval;
+	/** Periodic interval (nanoseconds, 0 == one shot). */
+	xnticks_t interval_ns;
+	/** Count of timer ticks in periodic mode. */
+	xnticks_t periodic_ticks;
+	/** First tick date in periodic mode. */
+	xnticks_t start_date;
+	/** Date of next periodic release point (timer ticks). */
+	xnticks_t pexpect_ticks;
+	/** Sched structure to which the timer is attached. */
+	struct xnsched *sched;
+	/** Timeout handler. */
+	void (*handler)(struct xntimer *timer);
+#ifdef CONFIG_XENO_OPT_STATS
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	struct xnclock *tracker;
+#endif
+	/** Timer name to be displayed. */
+	char name[XNOBJECT_NAME_LEN];
+	/** Timer holder in timebase. */
+	struct list_head next_stat;
+	/** Number of timer schedules. */
+	xnstat_counter_t scheduled;
+	/** Number of timer events. */
+	xnstat_counter_t fired;
+#endif /* CONFIG_XENO_OPT_STATS */
+};
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+
+static inline struct xnclock *xntimer_clock(struct xntimer *timer)
+{
+	return timer->clock;
+}
+
+void xntimer_set_clock(struct xntimer *timer,
+		       struct xnclock *newclock);
+
+#else /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline struct xnclock *xntimer_clock(struct xntimer *timer)
+{
+	return &nkclock;
+}
+
+static inline void xntimer_set_clock(struct xntimer *timer,
+				     struct xnclock *newclock)
+{
+	XENO_BUG_ON(COBALT, newclock != &nkclock);
+}
+
+#endif /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+#ifdef CONFIG_SMP
+static inline struct xnsched *xntimer_sched(struct xntimer *timer)
+{
+	return timer->sched;
+}
+#else /* !CONFIG_SMP */
+#define xntimer_sched(t)	xnsched_current()
+#endif /* !CONFIG_SMP */
+
+#define xntimer_percpu_queue(__timer)					\
+	({								\
+		struct xntimerdata *tmd;				\
+		int cpu = xnsched_cpu((__timer)->sched);		\
+		tmd = xnclock_percpu_timerdata(xntimer_clock(__timer), cpu); \
+		&tmd->q;						\
+	})
+
+static inline unsigned long xntimer_gravity(struct xntimer *timer)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+
+	if (timer->status & XNTIMER_KGRAVITY)
+		return clock->gravity.kernel;
+
+	if (timer->status & XNTIMER_UGRAVITY)
+		return clock->gravity.user;
+
+	return clock->gravity.irq;
+}
+
+static inline void xntimer_update_date(struct xntimer *timer)
+{
+	xntimerh_date(&timer->aplink) = timer->start_date
+		+ xnclock_ns_to_ticks(xntimer_clock(timer),
+			timer->periodic_ticks * timer->interval_ns)
+		- xntimer_gravity(timer);
+}
+
+static inline xnticks_t xntimer_pexpect(struct xntimer *timer)
+{
+	return timer->start_date +
+		xnclock_ns_to_ticks(xntimer_clock(timer),
+				timer->pexpect_ticks * timer->interval_ns);
+}
+
+static inline void xntimer_set_priority(struct xntimer *timer,
+					int prio)
+{
+	xntimerh_prio(&timer->aplink) = prio;
+}
+
+static inline int xntimer_active_p(struct xntimer *timer)
+{
+	return timer->sched != NULL;
+}
+
+static inline int xntimer_running_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_RUNNING) != 0;
+}
+
+static inline int xntimer_fired_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_FIRED) != 0;
+}
+
+static inline int xntimer_periodic_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_PERIODIC) != 0;
+}
+
+void __xntimer_init(struct xntimer *timer,
+		    struct xnclock *clock,
+		    void (*handler)(struct xntimer *timer),
+		    struct xnsched *sched,
+		    int flags);
+
+void xntimer_set_gravity(struct xntimer *timer,
+			 int gravity);
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+#define xntimer_init(__timer, __clock, __handler, __sched, __flags)	\
+do {									\
+	__xntimer_init(__timer, __clock, __handler, __sched, __flags);	\
+	xntimer_set_name(__timer, #__handler);				\
+} while (0)
+
+static inline void xntimer_reset_stats(struct xntimer *timer)
+{
+	xnstat_counter_set(&timer->scheduled, 0);
+	xnstat_counter_set(&timer->fired, 0);
+}
+
+static inline void xntimer_account_scheduled(struct xntimer *timer)
+{
+	xnstat_counter_inc(&timer->scheduled);
+}
+
+static inline void xntimer_account_fired(struct xntimer *timer)
+{
+	xnstat_counter_inc(&timer->fired);
+}
+
+static inline void xntimer_set_name(struct xntimer *timer, const char *name)
+{
+	knamecpy(timer->name, name);
+}
+
+#else /* !CONFIG_XENO_OPT_STATS */
+
+#define xntimer_init	__xntimer_init
+
+static inline void xntimer_reset_stats(struct xntimer *timer) { }
+
+static inline void xntimer_account_scheduled(struct xntimer *timer) { }
+
+static inline void xntimer_account_fired(struct xntimer *timer) { }
+
+static inline void xntimer_set_name(struct xntimer *timer, const char *name) { }
+
+#endif /* !CONFIG_XENO_OPT_STATS */
+
+#if defined(CONFIG_XENO_OPT_EXTCLOCK) && defined(CONFIG_XENO_OPT_STATS)
+void xntimer_switch_tracking(struct xntimer *timer,
+			     struct xnclock *newclock);
+#else
+static inline
+void xntimer_switch_tracking(struct xntimer *timer,
+			     struct xnclock *newclock) { }
+#endif
+
+void xntimer_destroy(struct xntimer *timer);
+
+/**
+ * @fn xnticks_t xntimer_interval(struct xntimer *timer)
+ *
+ * @brief Return the timer interval value.
+ *
+ * Return the timer interval value in nanoseconds.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @return The duration of a period in nanoseconds. The special value
+ * XN_INFINITE is returned if @a timer is currently disabled or
+ * one shot.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+static inline xnticks_t xntimer_interval(struct xntimer *timer)
+{
+	return timer->interval_ns;
+}
+
+static inline xnticks_t xntimer_expiry(struct xntimer *timer)
+{
+	/* Real expiry date in ticks without anticipation (no gravity) */
+	return xntimerh_date(&timer->aplink) + xntimer_gravity(timer);
+}
+
+int xntimer_start(struct xntimer *timer,
+		xnticks_t value,
+		xnticks_t interval,
+		xntmode_t mode);
+
+void __xntimer_stop(struct xntimer *timer);
+
+xnticks_t xntimer_get_date(struct xntimer *timer);
+
+xnticks_t __xntimer_get_timeout(struct xntimer *timer);
+
+xnticks_t xntimer_get_interval(struct xntimer *timer);
+
+int xntimer_heading_p(struct xntimer *timer);
+
+static inline void xntimer_stop(struct xntimer *timer)
+{
+	if (timer->status & XNTIMER_RUNNING)
+		__xntimer_stop(timer);
+}
+
+static inline xnticks_t xntimer_get_timeout(struct xntimer *timer)
+{
+	if (!xntimer_running_p(timer))
+		return XN_INFINITE;
+
+	return __xntimer_get_timeout(timer);
+}
+
+static inline xnticks_t xntimer_get_timeout_stopped(struct xntimer *timer)
+{
+	return __xntimer_get_timeout(timer);
+}
+
+static inline void xntimer_enqueue(struct xntimer *timer,
+				   xntimerq_t *q)
+{
+	xntimerq_insert(q, &timer->aplink);
+	timer->status &= ~XNTIMER_DEQUEUED;
+	xntimer_account_scheduled(timer);
+}
+
+static inline void xntimer_dequeue(struct xntimer *timer,
+				   xntimerq_t *q)
+{
+	xntimerq_remove(q, &timer->aplink);
+	timer->status |= XNTIMER_DEQUEUED;
+}
+
+unsigned long long xntimer_get_overruns(struct xntimer *timer,
+					struct xnthread *waiter,
+					xnticks_t now);
+
+#ifdef CONFIG_SMP
+
+void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched);
+
+static inline
+void xntimer_migrate(struct xntimer *timer, struct xnsched *sched)
+{				/* nklocked, IRQs off */
+	if (timer->sched != sched)
+		__xntimer_migrate(timer, sched);
+}
+
+void __xntimer_set_affinity(struct xntimer *timer,
+			    struct xnsched *sched);
+
+static inline void xntimer_set_affinity(struct xntimer *timer,
+					struct xnsched *sched)
+{
+	if (sched != xntimer_sched(timer))
+		__xntimer_set_affinity(timer, sched);
+}
+
+#else /* ! CONFIG_SMP */
+
+static inline void xntimer_migrate(struct xntimer *timer,
+				   struct xnsched *sched)
+{
+	timer->sched = sched;
+}
+
+static inline void xntimer_set_affinity(struct xntimer *timer,
+					struct xnsched *sched)
+{
+	xntimer_migrate(timer, sched);
+}
+
+#endif /* CONFIG_SMP */
+
+char *xntimer_format_time(xnticks_t ns,
+			  char *buf, size_t bufsz);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_TIMER_H */
+++ linux-patched/include/xenomai/cobalt/kernel/init.h	2022-03-21 12:58:31.646866952 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/registry.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_INIT_H
+#define _COBALT_KERNEL_INIT_H
+
+#include <linux/atomic.h>
+#include <linux/notifier.h>
+#include <cobalt/uapi/corectl.h>
+
+extern atomic_t cobalt_runstate;
+
+static inline enum cobalt_run_states realtime_core_state(void)
+{
+	return atomic_read(&cobalt_runstate);
+}
+
+static inline int realtime_core_enabled(void)
+{
+	return atomic_read(&cobalt_runstate) != COBALT_STATE_DISABLED;
+}
+
+static inline int realtime_core_running(void)
+{
+	return atomic_read(&cobalt_runstate) == COBALT_STATE_RUNNING;
+}
+
+static inline void set_realtime_core_state(enum cobalt_run_states state)
+{
+	atomic_set(&cobalt_runstate, state);
+}
+
+void cobalt_add_state_chain(struct notifier_block *nb);
+
+void cobalt_remove_state_chain(struct notifier_block *nb);
+
+void cobalt_call_state_chain(enum cobalt_run_states newstate);
+
+#endif /* !_COBALT_KERNEL_INIT_H_ */
+++ linux-patched/include/xenomai/cobalt/kernel/registry.h	2022-03-21 12:58:31.639867021 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-rt.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_REGISTRY_H
+#define _COBALT_KERNEL_REGISTRY_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/vfile.h>
+
+/**
+ * @addtogroup cobalt_core_registry
+ *
+ * @{
+ */
+struct xnpnode;
+
+struct xnobject {
+	void *objaddr;
+	const char *key;	  /* !< Hash key. May be NULL if anonynous. */
+	unsigned long cstamp;		  /* !< Creation stamp. */
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnpnode *pnode;	/* !< v-file information class. */
+	union {
+		struct {
+			struct xnvfile_rev_tag tag;
+			struct xnvfile_snapshot file;
+		} vfsnap; /* !< virtual snapshot file. */
+		struct xnvfile_regular vfreg; /* !< virtual regular file */
+		struct xnvfile_link link;     /* !< virtual link. */
+	} vfile_u;
+	struct xnvfile *vfilp;
+#endif /* CONFIG_XENO_OPT_VFILE */
+	struct hlist_node hlink; /* !< Link in h-table */
+	struct list_head link;
+};
+
+int xnregistry_init(void);
+
+void xnregistry_cleanup(void);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+#define XNOBJECT_EXPORT_SCHEDULED  ((struct xnvfile *)1L)
+#define XNOBJECT_EXPORT_INPROGRESS ((struct xnvfile *)2L)
+#define XNOBJECT_EXPORT_ABORTED    ((struct xnvfile *)3L)
+
+struct xnptree {
+	const char *dirname;
+	/* hidden */
+	int entries;
+	struct xnvfile_directory vdir;
+};
+
+#define DEFINE_XNPTREE(__var, __name)		\
+	struct xnptree __var = {		\
+		.dirname = __name,		\
+		.entries = 0,			\
+		.vdir = xnvfile_nodir,		\
+	}
+
+struct xnpnode_ops {
+	int (*export)(struct xnobject *object, struct xnpnode *pnode);
+	void (*unexport)(struct xnobject *object, struct xnpnode *pnode);
+	void (*touch)(struct xnobject *object);
+};
+
+struct xnpnode {
+	const char *dirname;
+	struct xnptree *root;
+	struct xnpnode_ops *ops;
+	/* hidden */
+	int entries;
+	struct xnvfile_directory vdir;
+};
+
+struct xnpnode_snapshot {
+	struct xnpnode node;
+	struct xnvfile_snapshot_template vfile;
+};
+
+struct xnpnode_regular {
+	struct xnpnode node;
+	struct xnvfile_regular_template vfile;
+};
+
+struct xnpnode_link {
+	struct xnpnode node;
+	char *(*target)(void *obj);
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+#define DEFINE_XNPTREE(__var, __name);
+
+/* Placeholders. */
+
+struct xnpnode {
+	const char *dirname;
+};
+
+struct xnpnode_snapshot {
+	struct xnpnode node;
+};
+
+struct xnpnode_regular {
+	struct xnpnode node;
+};
+
+struct xnpnode_link {
+	struct xnpnode node;
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/* Public interface. */
+
+extern struct xnobject *registry_obj_slots;
+
+static inline struct xnobject *xnregistry_validate(xnhandle_t handle)
+{
+	struct xnobject *object;
+	/*
+	 * Careful: a removed object which is still in flight to be
+	 * unexported carries a NULL objaddr, so we have to check this
+	 * as well.
+	 */
+	handle = xnhandle_get_index(handle);
+	if (likely(handle && handle < CONFIG_XENO_OPT_REGISTRY_NRSLOTS)) {
+		object = &registry_obj_slots[handle];
+		return object->objaddr ? object : NULL;
+	}
+
+	return NULL;
+}
+
+static inline const char *xnregistry_key(xnhandle_t handle)
+{
+	struct xnobject *object = xnregistry_validate(handle);
+	return object ? object->key : NULL;
+}
+
+int xnregistry_enter(const char *key,
+		     void *objaddr,
+		     xnhandle_t *phandle,
+		     struct xnpnode *pnode);
+
+static inline int
+xnregistry_enter_anon(void *objaddr, xnhandle_t *phandle)
+{
+	return xnregistry_enter(NULL, objaddr, phandle, NULL);
+}
+
+int xnregistry_bind(const char *key,
+		    xnticks_t timeout,
+		    int timeout_mode,
+		    xnhandle_t *phandle);
+
+int xnregistry_remove(xnhandle_t handle);
+
+static inline
+void *xnregistry_lookup(xnhandle_t handle,
+			unsigned long *cstamp_r)
+{
+	struct xnobject *object = xnregistry_validate(handle);
+
+	if (object == NULL)
+		return NULL;
+
+	if (cstamp_r)
+		*cstamp_r = object->cstamp;
+
+	return object->objaddr;
+}
+
+int xnregistry_unlink(const char *key);
+
+unsigned xnregistry_hash_size(void);
+
+extern struct xnpnode_ops xnregistry_vfsnap_ops;
+
+extern struct xnpnode_ops xnregistry_vlink_ops;
+
+extern struct xnpnode_ops xnregistry_vfreg_ops;
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_REGISTRY_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-rt.h	2022-03-21 12:58:31.631867099 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/time.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_RT_H
+#define _COBALT_KERNEL_SCHED_RT_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-rt.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/*
+ * Global priority scale for Xenomai's core scheduling class,
+ * available to SCHED_COBALT members.
+ */
+#define XNSCHED_CORE_MIN_PRIO	0
+#define XNSCHED_CORE_MAX_PRIO	259
+#define XNSCHED_CORE_NR_PRIO	\
+	(XNSCHED_CORE_MAX_PRIO - XNSCHED_CORE_MIN_PRIO + 1)
+
+/*
+ * Priority range for SCHED_FIFO, and all other classes Cobalt
+ * implements except SCHED_COBALT.
+ */
+#define XNSCHED_FIFO_MIN_PRIO	1
+#define XNSCHED_FIFO_MAX_PRIO	256
+
+#if XNSCHED_CORE_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR ||	\
+  (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&			\
+   XNSCHED_CORE_NR_PRIO > XNSCHED_MLQ_LEVELS)
+#error "XNSCHED_MLQ_LEVELS is too low"
+#endif
+
+extern struct xnsched_class xnsched_class_rt;
+
+static inline void __xnsched_rt_requeue(struct xnthread *thread)
+{
+	xnsched_addq(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_enqueue(struct xnthread *thread)
+{
+	xnsched_addq_tail(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_dequeue(struct xnthread *thread)
+{
+	xnsched_delq(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_track_weakness(struct xnthread *thread)
+{
+	/*
+	 * We have to track threads exiting weak scheduling, i.e. any
+	 * thread leaving the WEAK class code if compiled in, or
+	 * assigned a zero priority if weak threads are hosted by the
+	 * RT class.
+	 *
+	 * CAUTION: since we need to check the effective priority
+	 * level for determining the weakness state, this can only
+	 * apply to non-boosted threads.
+	 */
+	if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK) || thread->cprio)
+		xnthread_clear_state(thread, XNWEAK);
+	else
+		xnthread_set_state(thread, XNWEAK);
+}
+
+static inline bool __xnsched_rt_setparam(struct xnthread *thread,
+					 const union xnsched_policy_param *p)
+{
+	bool ret = xnsched_set_effective_priority(thread, p->rt.prio);
+	
+	if (!xnthread_test_state(thread, XNBOOST))
+		__xnsched_rt_track_weakness(thread);
+
+	return ret;
+}
+
+static inline void __xnsched_rt_getparam(struct xnthread *thread,
+					 union xnsched_policy_param *p)
+{
+	p->rt.prio = thread->cprio;
+}
+
+static inline void __xnsched_rt_trackprio(struct xnthread *thread,
+					  const union xnsched_policy_param *p)
+{
+	if (p)
+		thread->cprio = p->rt.prio; /* Force update. */
+	else {
+		thread->cprio = thread->bprio;
+		/* Leaving PI/PP, so non-boosted by definition. */
+		__xnsched_rt_track_weakness(thread);
+	}
+}
+
+static inline void __xnsched_rt_protectprio(struct xnthread *thread, int prio)
+{
+	/*
+	 * The RT class supports the widest priority range from
+	 * XNSCHED_CORE_MIN_PRIO to XNSCHED_CORE_MAX_PRIO inclusive,
+	 * no need to cap the input value which is guaranteed to be in
+	 * the range [1..XNSCHED_CORE_MAX_PRIO].
+	 */
+	thread->cprio = prio;
+}
+
+static inline void __xnsched_rt_forget(struct xnthread *thread)
+{
+}
+
+static inline int xnsched_rt_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+struct xnthread *xnsched_rt_pick(struct xnsched *sched);
+#else
+static inline struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+	return xnsched_getq(&sched->rt.runnable);
+}
+#endif
+
+void xnsched_rt_tick(struct xnsched *sched);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_RT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/time.h	2022-03-21 12:58:31.624867167 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/intr.h	1970-01-01 01:00:00.000000000 +0100
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _COBALT_KERNEL_TIME_H
+#define _COBALT_KERNEL_TIME_H
+
+#include <linux/time.h>
+#include <linux/time64.h>
+
+/**
+ * Read struct __kernel_timespec from userspace and convert to
+ * struct timespec64
+ *
+ * @param ts The destination, will be filled
+ * @param uts The source, provided by an application
+ * @return 0 on success, -EFAULT otherwise
+ */
+int cobalt_get_timespec64(struct timespec64 *ts,
+			  const struct __kernel_timespec __user *uts);
+
+/**
+ * Covert struct timespec64 to struct __kernel_timespec
+ * and copy to userspace
+ *
+ * @param ts The source, provided by kernel
+ * @param uts The destination, will be filled
+ * @return 0 on success, -EFAULT otherwise
+ */
+int cobalt_put_timespec64(const struct timespec64 *ts,
+			   struct __kernel_timespec __user *uts);
+
+#endif //_COBALT_KERNEL_TIME_H
+++ linux-patched/include/xenomai/cobalt/kernel/intr.h	2022-03-21 12:58:31.617867235 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/schedqueue.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_INTR_H
+#define _COBALT_KERNEL_INTR_H
+
+#include <linux/spinlock.h>
+#include <cobalt/kernel/stat.h>
+#include <pipeline/irq.h>
+
+/**
+ * @addtogroup cobalt_core_irq
+ * @{
+ */
+
+/* Possible return values of a handler. */
+#define XN_IRQ_NONE	 0x1
+#define XN_IRQ_HANDLED	 0x2
+#define XN_IRQ_STATMASK	 (XN_IRQ_NONE|XN_IRQ_HANDLED)
+#define XN_IRQ_PROPAGATE 0x100
+#define XN_IRQ_DISABLE   0x200
+
+/* Init flags. */
+#define XN_IRQTYPE_SHARED  0x1
+#define XN_IRQTYPE_EDGE    0x2
+
+/* Status bits. */
+#define XN_IRQSTAT_ATTACHED   0
+#define _XN_IRQSTAT_ATTACHED  (1 << XN_IRQSTAT_ATTACHED)
+#define XN_IRQSTAT_DISABLED   1
+#define _XN_IRQSTAT_DISABLED  (1 << XN_IRQSTAT_DISABLED)
+
+struct xnintr;
+struct xnsched;
+
+typedef int (*xnisr_t)(struct xnintr *intr);
+
+typedef void (*xniack_t)(unsigned irq, void *arg);
+
+struct xnirqstat {
+	/** Number of handled receipts since attachment. */
+	xnstat_counter_t hits;
+	/** Runtime accounting entity */
+	xnstat_exectime_t account;
+	/** Accumulated accounting entity */
+	xnstat_exectime_t sum;
+};
+
+struct xnintr {
+#ifdef CONFIG_XENO_OPT_SHIRQ
+	/** Next object in the IRQ-sharing chain. */
+	struct xnintr *next;
+#endif
+	/** Number of consequent unhandled interrupts */
+	unsigned int unhandled;
+	/** Interrupt service routine. */
+	xnisr_t isr;
+	/** User-defined cookie value. */
+	void *cookie;
+	/** runtime status */
+	unsigned long status;
+	/** Creation flags. */
+	int flags;
+	/** IRQ number. */
+	unsigned int irq;
+	/** Interrupt acknowledge routine. */
+	xniack_t iack;
+	/** Symbolic name. */
+	const char *name;
+	/** Descriptor maintenance lock. */
+	raw_spinlock_t lock;
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+	/** Statistics. */
+	struct xnirqstat *stats;
+#endif
+};
+
+struct xnintr_iterator {
+    int cpu;		/** Current CPU in iteration. */
+    unsigned long hits;	/** Current hit counter. */
+    xnticks_t exectime_period;	/** Used CPU time in current accounting period. */
+    xnticks_t account_period; /** Length of accounting period. */
+    xnticks_t exectime_total;	/** Overall CPU time consumed. */
+    int list_rev;	/** System-wide xnintr list revision (internal use). */
+    struct xnintr *prev;	/** Previously visited xnintr object (internal use). */
+};
+
+void xnintr_core_clock_handler(void);
+
+void xnintr_host_tick(struct xnsched *sched);
+
+    /* Public interface. */
+
+int xnintr_init(struct xnintr *intr,
+		const char *name,
+		unsigned irq,
+		xnisr_t isr,
+		xniack_t iack,
+		int flags);
+
+void xnintr_destroy(struct xnintr *intr);
+
+int xnintr_attach(struct xnintr *intr,
+		  void *cookie, const cpumask_t *cpumask);
+
+void xnintr_detach(struct xnintr *intr);
+
+void xnintr_enable(struct xnintr *intr);
+
+void xnintr_disable(struct xnintr *intr);
+
+int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask);
+
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+
+int xnintr_query_init(struct xnintr_iterator *iterator);
+
+int xnintr_get_query_lock(void);
+
+void xnintr_put_query_lock(void);
+
+int xnintr_query_next(int irq, struct xnintr_iterator *iterator,
+		      char *name_buf);
+
+#else /* !CONFIG_XENO_OPT_STATS_IRQS */
+
+static inline int xnintr_query_init(struct xnintr_iterator *iterator)
+{
+	return 0;
+}
+
+static inline int xnintr_get_query_lock(void)
+{
+	return 0;
+}
+
+static inline void xnintr_put_query_lock(void) {}
+#endif /* !CONFIG_XENO_OPT_STATS_IRQS */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_INTR_H */
+++ linux-patched/include/xenomai/cobalt/kernel/schedqueue.h	2022-03-21 12:58:31.609867313 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/vdso.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHEDQUEUE_H
+#define _COBALT_KERNEL_SCHEDQUEUE_H
+
+#include <cobalt/kernel/list.h>
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#define XNSCHED_CLASS_WEIGHT_FACTOR	1024
+
+#ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
+
+#include <linux/bitmap.h>
+
+/*
+ * Multi-level priority queue, suitable for handling the runnable
+ * thread queue of the core scheduling class with O(1) property. We
+ * only manage a descending queuing order, i.e. highest numbered
+ * priorities come first.
+ */
+#define XNSCHED_MLQ_LEVELS  260	/* i.e. XNSCHED_CORE_NR_PRIO */
+
+struct xnsched_mlq {
+	int elems;
+	DECLARE_BITMAP(prio_map, XNSCHED_MLQ_LEVELS);
+	struct list_head heads[XNSCHED_MLQ_LEVELS];
+};
+
+struct xnthread;
+
+void xnsched_initq(struct xnsched_mlq *q);
+
+void xnsched_addq(struct xnsched_mlq *q,
+		  struct xnthread *thread);
+
+void xnsched_addq_tail(struct xnsched_mlq *q, 
+		       struct xnthread *thread);
+
+void xnsched_delq(struct xnsched_mlq *q,
+		  struct xnthread *thread);
+
+struct xnthread *xnsched_getq(struct xnsched_mlq *q);
+
+static inline int xnsched_emptyq_p(struct xnsched_mlq *q)
+{
+	return q->elems == 0;
+}
+
+static inline int xnsched_weightq(struct xnsched_mlq *q)
+{
+	return find_first_bit(q->prio_map, XNSCHED_MLQ_LEVELS);
+}
+
+typedef struct xnsched_mlq xnsched_queue_t;
+
+#else /* ! CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+typedef struct list_head xnsched_queue_t;
+
+#define xnsched_initq(__q)			INIT_LIST_HEAD(__q)
+#define xnsched_emptyq_p(__q)			list_empty(__q)
+#define xnsched_addq(__q, __t)			list_add_prilf(__t, __q, cprio, rlink)
+#define xnsched_addq_tail(__q, __t)		list_add_priff(__t, __q, cprio, rlink)
+#define xnsched_delq(__q, __t)			(void)(__q), list_del(&(__t)->rlink)
+#define xnsched_getq(__q)							\
+	({									\
+		struct xnthread *__t = NULL;					\
+		if (!list_empty(__q))						\
+			__t = list_get_entry(__q, struct xnthread, rlink);	\
+		__t;								\
+	})
+#define xnsched_weightq(__q)						\
+	({								\
+		struct xnthread *__t;					\
+		__t = list_first_entry(__q, struct xnthread, rlink);	\
+		__t->cprio;						\
+	})
+	
+
+#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+struct xnthread *xnsched_findq(xnsched_queue_t *q, int prio);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHEDQUEUE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/vdso.h	2022-03-21 12:58:31.602867381 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/pipe.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Wolfgang Mauerer <wolfgang.mauerer@siemens.com>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_VDSO_H
+#define _COBALT_KERNEL_VDSO_H
+
+#include <linux/time.h>
+#include <asm/barrier.h>
+#include <asm/atomic.h>
+#include <asm/processor.h>
+#include <cobalt/uapi/kernel/vdso.h>
+
+extern struct xnvdso *nkvdso;
+
+/*
+ * Define the available feature set here. We have a single feature
+ * defined for now, only in the I-pipe case.
+ */
+#ifdef CONFIG_IPIPE_HAVE_HOSTRT
+
+#define XNVDSO_FEATURES XNVDSO_FEAT_HOST_REALTIME
+
+static inline struct xnvdso_hostrt_data *get_hostrt_data(void)
+{
+	return &nkvdso->hostrt_data;
+}
+
+#else
+
+#define XNVDSO_FEATURES 0
+
+#endif
+
+#endif /* _COBALT_KERNEL_VDSO_H */
+++ linux-patched/include/xenomai/cobalt/kernel/pipe.h	2022-03-21 12:58:31.595867450 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/ancillaries.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA
+ * 02139, USA; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_PIPE_H
+#define _COBALT_KERNEL_PIPE_H
+
+#include <linux/types.h>
+#include <linux/poll.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/uapi/kernel/pipe.h>
+
+#define XNPIPE_NDEVS      CONFIG_XENO_OPT_PIPE_NRDEV
+#define XNPIPE_DEV_MAJOR  150
+
+#define XNPIPE_KERN_CONN         0x1
+#define XNPIPE_KERN_LCLOSE       0x2
+#define XNPIPE_USER_CONN         0x4
+#define XNPIPE_USER_SIGIO        0x8
+#define XNPIPE_USER_WREAD        0x10
+#define XNPIPE_USER_WREAD_READY  0x20
+#define XNPIPE_USER_WSYNC        0x40
+#define XNPIPE_USER_WSYNC_READY  0x80
+#define XNPIPE_USER_LCONN        0x100
+
+#define XNPIPE_USER_ALL_WAIT \
+(XNPIPE_USER_WREAD|XNPIPE_USER_WSYNC)
+
+#define XNPIPE_USER_ALL_READY \
+(XNPIPE_USER_WREAD_READY|XNPIPE_USER_WSYNC_READY)
+
+struct xnpipe_mh {
+	size_t size;
+	size_t rdoff;
+	struct list_head link;
+};
+
+struct xnpipe_state;
+
+struct xnpipe_operations {
+	void (*output)(struct xnpipe_mh *mh, void *xstate);
+	int (*input)(struct xnpipe_mh *mh, int retval, void *xstate);
+	void *(*alloc_ibuf)(size_t size, void *xstate);
+	void (*free_ibuf)(void *buf, void *xstate);
+	void (*free_obuf)(void *buf, void *xstate);
+	void (*release)(void *xstate);
+};
+
+struct xnpipe_state {
+	struct list_head slink;	/* Link on sleep queue */
+	struct list_head alink;	/* Link on async queue */
+
+	struct list_head inq;		/* From user-space to kernel */
+	int nrinq;
+	struct list_head outq;		/* From kernel to user-space */
+	int nroutq;
+	struct xnsynch synchbase;
+	struct xnpipe_operations ops;
+	void *xstate;		/* Extra state managed by caller */
+
+	/* Linux kernel part */
+	unsigned long status;
+	struct fasync_struct *asyncq;
+	wait_queue_head_t readq;	/* open/read/poll waiters */
+	wait_queue_head_t syncq;	/* sync waiters */
+	int wcount;			/* number of waiters on this minor */
+	size_t ionrd;
+};
+
+extern struct xnpipe_state xnpipe_states[];
+
+#define xnminor_from_state(s) (s - xnpipe_states)
+
+#ifdef CONFIG_XENO_OPT_PIPE
+int xnpipe_mount(void);
+void xnpipe_umount(void);
+#else /* !CONFIG_XENO_OPT_PIPE */
+static inline int xnpipe_mount(void) { return 0; }
+static inline void xnpipe_umount(void) { }
+#endif /* !CONFIG_XENO_OPT_PIPE */
+
+/* Entry points of the kernel interface. */
+
+int xnpipe_connect(int minor,
+		   struct xnpipe_operations *ops, void *xstate);
+
+int xnpipe_disconnect(int minor);
+
+ssize_t xnpipe_send(int minor,
+		    struct xnpipe_mh *mh, size_t size, int flags);
+
+ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size);
+
+ssize_t xnpipe_recv(int minor,
+		    struct xnpipe_mh **pmh, xnticks_t timeout);
+
+int xnpipe_flush(int minor, int mode);
+
+int xnpipe_pollstate(int minor, unsigned int *mask_r);
+
+static inline unsigned int __xnpipe_pollstate(int minor)
+{
+	struct xnpipe_state *state = xnpipe_states + minor;
+	unsigned int mask = POLLOUT;
+
+	if (!list_empty(&state->inq))
+		mask |= POLLIN;
+
+	return mask;
+}
+
+static inline char *xnpipe_m_data(struct xnpipe_mh *mh)
+{
+	return (char *)(mh + 1);
+}
+
+#define xnpipe_m_size(mh) ((mh)->size)
+
+#define xnpipe_m_rdoff(mh) ((mh)->rdoff)
+
+#endif /* !_COBALT_KERNEL_PIPE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/ancillaries.h	2022-03-21 12:58:31.587867528 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/bufd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ANCILLARIES_H
+#define _COBALT_KERNEL_ANCILLARIES_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/uidgid.h>
+#include <cobalt/uapi/kernel/limits.h>
+
+#define ksformat(__dst, __len, __fmt, __args...)			\
+	({								\
+		size_t __ret;						\
+		__ret = snprintf(__dst, __len, __fmt, ##__args);	\
+		if (__ret >= __len)					\
+			__dst[__len-1] = '\0';				\
+		__ret;							\
+	})
+
+#define kasformat(__fmt, __args...)					\
+	({								\
+		kasprintf(GFP_KERNEL, __fmt, ##__args);			\
+	})
+
+#define kvsformat(__dst, __len, __fmt, __ap)				\
+	({								\
+		size_t __ret;						\
+		__ret = vsnprintf(__dst, __len, __fmt, __ap);		\
+		if (__ret >= __len)					\
+			__dst[__len-1] = '\0';				\
+		__ret;							\
+	})
+
+#define kvasformat(__fmt, __ap)						\
+	({								\
+		kvasprintf(GFP_KERNEL, __fmt, __ap);			\
+	})
+
+void __knamecpy_requires_character_array_as_destination(void);
+
+#define knamecpy(__dst, __src)						\
+	({								\
+		if (!__builtin_types_compatible_p(typeof(__dst), char[])) \
+			__knamecpy_requires_character_array_as_destination();	\
+		strncpy((__dst), __src, sizeof(__dst));			\
+		__dst[sizeof(__dst) - 1] = '\0';			\
+		__dst;							\
+	 })
+
+#define get_current_uuid() from_kuid_munged(current_user_ns(), current_uid())
+
+#endif /* !_COBALT_KERNEL_ANCILLARIES_H */
+++ linux-patched/include/xenomai/cobalt/kernel/bufd.h	2022-03-21 12:58:31.580867596 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-quota.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_BUFD_H
+#define _COBALT_KERNEL_BUFD_H
+
+#include <linux/types.h>
+
+/**
+ * @addtogroup cobalt_core_bufd
+ *
+ * @{
+ */
+
+struct mm_struct;
+
+struct xnbufd {
+	caddr_t b_ptr;		/* src/dst buffer address */
+	size_t b_len;		/* total length of buffer */
+	off_t b_off;		/* # of bytes read/written */
+	struct mm_struct *b_mm;	/* src/dst address space */
+	caddr_t b_carry;	/* pointer to carry over area */
+	char b_buf[64];		/* fast carry over area */
+};
+
+void xnbufd_map_umem(struct xnbufd *bufd,
+		     void __user *ptr, size_t len);
+
+static inline void xnbufd_map_uread(struct xnbufd *bufd,
+				    const void __user *ptr, size_t len)
+{
+	xnbufd_map_umem(bufd, (void __user *)ptr, len);
+}
+
+static inline void xnbufd_map_uwrite(struct xnbufd *bufd,
+				     void __user *ptr, size_t len)
+{
+	xnbufd_map_umem(bufd, ptr, len);
+}
+
+ssize_t xnbufd_unmap_uread(struct xnbufd *bufd);
+
+ssize_t xnbufd_unmap_uwrite(struct xnbufd *bufd);
+
+void xnbufd_map_kmem(struct xnbufd *bufd,
+		     void *ptr, size_t len);
+
+static inline void xnbufd_map_kread(struct xnbufd *bufd,
+				    const void *ptr, size_t len)
+{
+	xnbufd_map_kmem(bufd, (void *)ptr, len);
+}
+
+static inline void xnbufd_map_kwrite(struct xnbufd *bufd,
+				     void *ptr, size_t len)
+{
+	xnbufd_map_kmem(bufd, ptr, len);
+}
+
+ssize_t xnbufd_unmap_kread(struct xnbufd *bufd);
+
+ssize_t xnbufd_unmap_kwrite(struct xnbufd *bufd);
+
+ssize_t xnbufd_copy_to_kmem(void *ptr,
+			    struct xnbufd *bufd, size_t len);
+
+ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd,
+			      void *from, size_t len);
+
+void xnbufd_invalidate(struct xnbufd *bufd);
+
+static inline void xnbufd_reset(struct xnbufd *bufd)
+{
+	bufd->b_off = 0;
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_BUFD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-quota.h	2022-03-21 12:58:31.572867674 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/tree.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_QUOTA_H
+#define _COBALT_KERNEL_SCHED_QUOTA_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-quota.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+
+#define XNSCHED_QUOTA_MIN_PRIO	1
+#define XNSCHED_QUOTA_MAX_PRIO	255
+#define XNSCHED_QUOTA_NR_PRIO	\
+	(XNSCHED_QUOTA_MAX_PRIO - XNSCHED_QUOTA_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_quota;
+
+struct xnsched_quota_group {
+	struct xnsched *sched;
+	xnticks_t quota_ns;
+	xnticks_t quota_peak_ns;
+	xnticks_t run_start_ns;
+	xnticks_t run_budget_ns;
+	xnticks_t run_credit_ns;
+	struct list_head members;
+	struct list_head expired;
+	struct list_head next;
+	int nr_active;
+	int nr_threads;
+	int tgid;
+	int quota_percent;
+	int quota_peak_percent;
+};
+
+struct xnsched_quota {
+	xnticks_t period_ns;
+	struct xntimer refill_timer;
+	struct xntimer limit_timer;
+	struct list_head groups;
+};
+
+static inline int xnsched_quota_init_thread(struct xnthread *thread)
+{
+	thread->quota = NULL;
+	INIT_LIST_HEAD(&thread->quota_expired);
+
+	return 0;
+}
+
+int xnsched_quota_create_group(struct xnsched_quota_group *tg,
+			       struct xnsched *sched,
+			       int *quota_sum_r);
+
+int xnsched_quota_destroy_group(struct xnsched_quota_group *tg,
+				int force,
+				int *quota_sum_r);
+
+void xnsched_quota_set_limit(struct xnsched_quota_group *tg,
+			     int quota_percent, int quota_peak_percent,
+			     int *quota_sum_r);
+
+struct xnsched_quota_group *
+xnsched_quota_find_group(struct xnsched *sched, int tgid);
+
+int xnsched_quota_sum_all(struct xnsched *sched);
+
+#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_QUOTA_H */
+++ linux-patched/include/xenomai/cobalt/kernel/tree.h	2022-03-21 12:58:31.565867742 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-sporadic.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_TREE_H
+#define _COBALT_KERNEL_TREE_H
+
+#include <linux/errno.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/assert.h>
+
+typedef unsigned long long xnkey_t;
+
+static inline xnkey_t PTR_KEY(void *p)
+{
+	return (xnkey_t)(long)p;
+}
+
+struct xnid {
+	xnkey_t key;
+	struct rb_node link;
+};
+
+#define xnid_entry(ptr, type, member)					\
+	({								\
+		typeof(ptr) _ptr = (ptr);				\
+		(_ptr ? container_of(_ptr, type, member.link) : NULL);	\
+	})
+
+#define xnid_next_entry(ptr, member)				\
+	xnid_entry(rb_next(&ptr->member.link), typeof(*ptr), member)
+
+static inline void xntree_init(struct rb_root *t)
+{
+	*t = RB_ROOT;
+}
+
+#define xntree_for_each_entry(pos, root, member)			\
+	for (pos = xnid_entry(rb_first(root), typeof(*pos), member);	\
+	     pos; pos = xnid_next_entry(pos, member))
+
+void xntree_cleanup(struct rb_root *t, void *cookie,
+		void (*destroy)(void *cookie, struct xnid *id));
+
+int xnid_enter(struct rb_root *t, struct xnid *xnid, xnkey_t key);
+
+static inline xnkey_t xnid_key(struct xnid *i)
+{
+	return i->key;
+}
+
+static inline
+struct xnid *xnid_fetch(struct rb_root *t, xnkey_t key)
+{
+	struct rb_node *node = t->rb_node;
+
+	while (node) {
+		struct xnid *i = container_of(node, struct xnid, link);
+
+		if (key < i->key)
+			node = node->rb_left;
+		else if (key > i->key)
+			node = node->rb_right;
+		else
+			return i;
+	}
+
+	return NULL;
+}
+
+static inline int xnid_remove(struct rb_root *t, struct xnid *xnid)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	if (xnid_fetch(t, xnid->key) != xnid)
+		return -ENOENT;
+#endif
+	rb_erase(&xnid->link, t);
+	return 0;
+}
+
+#endif /* _COBALT_KERNEL_TREE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-sporadic.h	2022-03-21 12:58:31.558867811 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/stat.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_SPORADIC_H
+#define _COBALT_KERNEL_SCHED_SPORADIC_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-sporadic.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+
+#define XNSCHED_SPORADIC_MIN_PRIO	1
+#define XNSCHED_SPORADIC_MAX_PRIO	255
+#define XNSCHED_SPORADIC_NR_PRIO	\
+	(XNSCHED_SPORADIC_MAX_PRIO - XNSCHED_SPORADIC_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_sporadic;
+
+struct xnsched_sporadic_repl {
+	xnticks_t date;
+	xnticks_t amount;
+};
+
+struct xnsched_sporadic_data {
+	xnticks_t resume_date;
+	xnticks_t budget;
+	int repl_in;
+	int repl_out;
+	int repl_pending;
+	struct xntimer repl_timer;
+	struct xntimer drop_timer;
+	struct xnsched_sporadic_repl repl_data[CONFIG_XENO_OPT_SCHED_SPORADIC_MAXREPL];
+	struct xnsched_sporadic_param param;
+	struct xnthread *thread;
+};
+
+struct xnsched_sporadic {
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	unsigned long drop_retries;
+#endif
+};
+
+static inline int xnsched_sporadic_init_thread(struct xnthread *thread)
+{
+	thread->pss = NULL;
+
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_SPORADIC */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_SPORADIC_H */
+++ linux-patched/include/xenomai/cobalt/kernel/stat.h	2022-03-21 12:58:31.550867889 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2006 Dmitry Adamushko <dmitry.adamushko@gmail.com>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_STAT_H
+#define _COBALT_KERNEL_STAT_H
+
+#include <cobalt/kernel/clock.h>
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_stat Thread runtime statistics
+ * @{
+ */
+#ifdef CONFIG_XENO_OPT_STATS
+
+typedef struct xnstat_exectime {
+
+	xnticks_t start;   /* Start of execution time accumulation */
+
+	xnticks_t total; /* Accumulated execution time */
+
+} xnstat_exectime_t;
+
+/* Return current date which can be passed to other xnstat services for
+   immediate or lazy accounting. */
+#define xnstat_exectime_now() xnclock_core_read_raw()
+
+/* Accumulate exectime of the current account until the given date. */
+#define xnstat_exectime_update(sched, date) \
+do { \
+	xnticks_t __date = date; \
+	(sched)->current_account->total += \
+		__date - (sched)->last_account_switch; \
+	(sched)->last_account_switch = __date; \
+	/* All changes must be committed before changing the current_account \
+	   reference in sched (required for xnintr_sync_stat_references) */ \
+	smp_wmb(); \
+} while (0)
+
+/* Update the current account reference, returning the previous one. */
+#define xnstat_exectime_set_current(sched, new_account) \
+({ \
+	xnstat_exectime_t *__prev; \
+	__prev = (xnstat_exectime_t *) \
+		atomic_long_xchg((atomic_long_t *)&(sched)->current_account, \
+				 (long)(new_account)); \
+	__prev; \
+})
+
+/* Return the currently active accounting entity. */
+#define xnstat_exectime_get_current(sched) ((sched)->current_account)
+
+/* Finalize an account (no need to accumulate the exectime, just mark the
+   switch date and set the new account). */
+#define xnstat_exectime_finalize(sched, new_account) \
+do { \
+	(sched)->last_account_switch = xnclock_core_read_raw(); \
+	(sched)->current_account = (new_account); \
+} while (0)
+
+/* Obtain content of xnstat_exectime_t */
+#define xnstat_exectime_get_start(account)	((account)->start)
+#define xnstat_exectime_get_total(account)	((account)->total)
+
+/* Obtain last account switch date of considered sched */
+#define xnstat_exectime_get_last_switch(sched)	((sched)->last_account_switch)
+
+/* Reset statistics from inside the accounted entity (e.g. after CPU
+   migration). */
+#define xnstat_exectime_reset_stats(stat) \
+do { \
+	(stat)->total = 0; \
+	(stat)->start = xnclock_core_read_raw(); \
+} while (0)
+
+
+typedef struct xnstat_counter {
+	unsigned long counter;
+} xnstat_counter_t;
+
+static inline unsigned long xnstat_counter_inc(xnstat_counter_t *c)
+{
+	return c->counter++;
+}
+
+static inline unsigned long xnstat_counter_get(xnstat_counter_t *c)
+{
+	return c->counter;
+}
+
+static inline void xnstat_counter_set(xnstat_counter_t *c, unsigned long value)
+{
+	c->counter = value;
+}
+
+#else /* !CONFIG_XENO_OPT_STATS */
+typedef struct xnstat_exectime {
+} xnstat_exectime_t;
+
+#define xnstat_exectime_now()					({ 0; })
+#define xnstat_exectime_update(sched, date)			do { } while (0)
+#define xnstat_exectime_set_current(sched, new_account)		({ (void)sched; NULL; })
+#define xnstat_exectime_get_current(sched)			({ (void)sched; NULL; })
+#define xnstat_exectime_finalize(sched, new_account)		do { } while (0)
+#define xnstat_exectime_get_start(account)			({ 0; })
+#define xnstat_exectime_get_total(account)			({ 0; })
+#define xnstat_exectime_get_last_switch(sched)			({ 0; })
+#define xnstat_exectime_reset_stats(account)			do { } while (0)
+
+typedef struct xnstat_counter {
+} xnstat_counter_t;
+
+#define xnstat_counter_inc(c) ({ do { } while(0); 0; })
+#define xnstat_counter_get(c) ({ 0; })
+#define xnstat_counter_set(c, value) do { } while (0)
+#endif /* CONFIG_XENO_OPT_STATS */
+
+/* Account the exectime of the current account until now, switch to
+   new_account, and return the previous one. */
+#define xnstat_exectime_switch(sched, new_account) \
+({ \
+	xnstat_exectime_update(sched, xnstat_exectime_now()); \
+	xnstat_exectime_set_current(sched, new_account); \
+})
+
+/* Account the exectime of the current account until given start time, switch
+   to new_account, and return the previous one. */
+#define xnstat_exectime_lazy_switch(sched, new_account, date) \
+({ \
+	xnstat_exectime_update(sched, date); \
+	xnstat_exectime_set_current(sched, new_account); \
+})
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_STAT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/thread.h	2022-03-21 12:58:31.543867957 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/select.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_THREAD_H
+#define _COBALT_KERNEL_THREAD_H
+
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <pipeline/thread.h>
+#include <pipeline/inband_work.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/schedparam.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/kernel/thread.h>
+#include <cobalt/uapi/signal.h>
+#include <asm/xenomai/machine.h>
+#include <asm/xenomai/thread.h>
+
+/**
+ * @addtogroup cobalt_core_thread
+ * @{
+ */
+#define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
+#define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
+
+#define XNTHREAD_SIGDEBUG		0
+#define XNTHREAD_SIGSHADOW_HARDEN	1
+#define XNTHREAD_SIGSHADOW_BACKTRACE	2
+#define XNTHREAD_SIGSHADOW_HOME		3
+#define XNTHREAD_SIGTERM		4
+#define XNTHREAD_MAX_SIGNALS		5
+
+struct xnthread;
+struct xnsched;
+struct xnselector;
+struct xnsched_class;
+struct xnsched_tpslot;
+struct xnthread_personality;
+struct completion;
+
+struct lostage_signal {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+	struct task_struct *task;
+	int signo, sigval;
+	struct lostage_signal *self; /* Revisit: I-pipe requirement */
+};
+
+struct xnthread_init_attr {
+	struct xnthread_personality *personality;
+	cpumask_t affinity;
+	int flags;
+	const char *name;
+};
+
+struct xnthread_start_attr {
+	int mode;
+	void (*entry)(void *cookie);
+	void *cookie;
+};
+
+struct xnthread_wait_context {
+	int posted;
+};
+
+struct xnthread_personality {
+	const char *name;
+	unsigned int magic;
+	int xid;
+	atomic_t refcnt;
+	struct {
+		void *(*attach_process)(void);
+		void (*detach_process)(void *arg);
+		void (*map_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*relax_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*harden_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*move_thread)(struct xnthread *thread,
+							    int dest_cpu);
+		struct xnthread_personality *(*exit_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*finalize_thread)(struct xnthread *thread);
+	} ops;
+	struct module *module;
+};
+
+struct xnthread {
+	struct xnarchtcb tcb;	/* Architecture-dependent block */
+
+	__u32 state;		/* Thread state flags */
+	__u32 info;		/* Thread information flags */
+	__u32 local_info;	/* Local thread information flags */
+
+	struct xnsched *sched;		/* Thread scheduler */
+	struct xnsched_class *sched_class; /* Current scheduling class */
+	struct xnsched_class *base_class; /* Base scheduling class */
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	struct xnsched_tpslot *tps;	/* Current partition slot for TP scheduling */
+	struct list_head tp_link;	/* Link in per-sched TP thread queue */
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	struct xnsched_quota_group *quota; /* Quota scheduling group. */
+	struct list_head quota_expired;
+	struct list_head quota_next;
+#endif
+	cpumask_t affinity;	/* Processor affinity. */
+
+	/** Base priority (before PI/PP boost) */
+	int bprio;
+
+	/** Current (effective) priority */
+	int cprio;
+
+	/**
+	 * Weighted priority (cprio + scheduling class weight).
+	 */
+	int wprio;
+
+	int lock_count;	/** Scheduler lock count. */
+
+	/**
+	 * Thread holder in xnsched run queue. Ordered by
+	 * thread->cprio.
+	 */
+	struct list_head rlink;
+
+	/**
+	 * Thread holder in xnsynch pendq. Prioritized by
+	 * thread->cprio + scheduling class weight.
+	 */
+	struct list_head plink;
+
+	/** Thread holder in global queue. */
+	struct list_head glink;
+
+	/**
+	 * List of xnsynch owned by this thread which cause a priority
+	 * boost due to one of the following reasons:
+	 *
+	 * - they are currently claimed by other thread(s) when
+	 * enforcing the priority inheritance protocol (XNSYNCH_PI).
+	 *
+	 * - they require immediate priority ceiling (XNSYNCH_PP).
+	 *
+	 * This list is ordered by decreasing (weighted) thread
+	 * priorities.
+	 */
+	struct list_head boosters;
+
+	struct xnsynch *wchan;		/* Resource the thread pends on */
+
+	struct xnsynch *wwake;		/* Wait channel the thread was resumed from */
+
+	int res_count;			/* Held resources count */
+
+	struct xntimer rtimer;		/* Resource timer */
+
+	struct xntimer ptimer;		/* Periodic timer */
+
+	xnticks_t rrperiod;		/* Allotted round-robin period (ns) */
+
+  	struct xnthread_wait_context *wcontext;	/* Active wait context. */
+
+	struct {
+		xnstat_counter_t ssw;	/* Primary -> secondary mode switch count */
+		xnstat_counter_t csw;	/* Context switches (includes secondary -> primary switches) */
+		xnstat_counter_t xsc;	/* Xenomai syscalls */
+		xnstat_counter_t pf;	/* Number of page faults */
+		xnstat_exectime_t account; /* Execution time accounting entity */
+		xnstat_exectime_t lastperiod; /* Interval marker for execution time reports */
+	} stat;
+
+	struct xnselector *selector;    /* For select. */
+
+	xnhandle_t handle;	/* Handle in registry */
+
+	char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */
+
+	void (*entry)(void *cookie); /* Thread entry routine */
+	void *cookie;		/* Cookie to pass to the entry routine */
+
+	/**
+	 * Thread data visible from userland through a window on the
+	 * global heap.
+	 */
+	struct xnthread_user_window *u_window;
+
+	struct xnthread_personality *personality;
+
+	struct completion exited;
+
+#ifdef CONFIG_XENO_OPT_DEBUG
+	const char *exe_path;	/* Executable path */
+	u32 proghash;		/* Hash value for exe_path */
+#endif
+	struct lostage_signal sigarray[XNTHREAD_MAX_SIGNALS];
+};
+
+static inline int xnthread_get_state(const struct xnthread *thread)
+{
+	return thread->state;
+}
+
+static inline int xnthread_test_state(struct xnthread *thread, int bits)
+{
+	return thread->state & bits;
+}
+
+static inline void xnthread_set_state(struct xnthread *thread, int bits)
+{
+	thread->state |= bits;
+}
+
+static inline void xnthread_clear_state(struct xnthread *thread, int bits)
+{
+	thread->state &= ~bits;
+}
+
+static inline int xnthread_test_info(struct xnthread *thread, int bits)
+{
+	return thread->info & bits;
+}
+
+static inline void xnthread_set_info(struct xnthread *thread, int bits)
+{
+	thread->info |= bits;
+}
+
+static inline void xnthread_clear_info(struct xnthread *thread, int bits)
+{
+	thread->info &= ~bits;
+}
+
+static inline int xnthread_test_localinfo(struct xnthread *curr, int bits)
+{
+	return curr->local_info & bits;
+}
+
+static inline void xnthread_set_localinfo(struct xnthread *curr, int bits)
+{
+	curr->local_info |= bits;
+}
+
+static inline void xnthread_clear_localinfo(struct xnthread *curr, int bits)
+{
+	curr->local_info &= ~bits;
+}
+
+static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread)
+{
+	return &thread->tcb;
+}
+
+static inline int xnthread_base_priority(const struct xnthread *thread)
+{
+	return thread->bprio;
+}
+
+static inline int xnthread_current_priority(const struct xnthread *thread)
+{
+	return thread->cprio;
+}
+
+static inline struct task_struct *xnthread_host_task(struct xnthread *thread)
+{
+	return xnarch_host_task(xnthread_archtcb(thread));
+}
+
+#define xnthread_for_each_booster(__pos, __thread)		\
+	list_for_each_entry(__pos, &(__thread)->boosters, next)
+
+#define xnthread_for_each_booster_safe(__pos, __tmp, __thread)	\
+	list_for_each_entry_safe(__pos, __tmp, &(__thread)->boosters, next)
+
+#define xnthread_run_handler(__t, __h, __a...)				\
+	do {								\
+		struct xnthread_personality *__p__ = (__t)->personality;	\
+		if ((__p__)->ops.__h)					\
+			(__p__)->ops.__h(__t, ##__a);			\
+	} while (0)
+
+#define xnthread_run_handler_stack(__t, __h, __a...)			\
+	do {								\
+		struct xnthread_personality *__p__ = (__t)->personality;	\
+		do {							\
+			if ((__p__)->ops.__h == NULL)			\
+				break;					\
+			__p__ = (__p__)->ops.__h(__t, ##__a);		\
+		} while (__p__);					\
+	} while (0)
+
+static inline
+struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread)
+{
+	return thread->wcontext;
+}
+
+static inline
+int xnthread_register(struct xnthread *thread, const char *name)
+{
+	return xnregistry_enter(name, thread, &thread->handle, NULL);
+}
+
+static inline
+struct xnthread *xnthread_lookup(xnhandle_t threadh)
+{
+	struct xnthread *thread = xnregistry_lookup(threadh, NULL);
+	return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL;
+}
+
+static inline void xnthread_sync_window(struct xnthread *thread)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline
+void xnthread_clear_sync_window(struct xnthread *thread, int state_bits)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state & ~state_bits;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline
+void xnthread_set_sync_window(struct xnthread *thread, int state_bits)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state | state_bits;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline int normalize_priority(int prio)
+{
+	return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
+}
+
+int __xnthread_init(struct xnthread *thread,
+		    const struct xnthread_init_attr *attr,
+		    struct xnsched *sched,
+		    struct xnsched_class *sched_class,
+		    const union xnsched_policy_param *sched_param);
+
+void __xnthread_test_cancel(struct xnthread *curr);
+
+void __xnthread_cleanup(struct xnthread *curr);
+
+void __xnthread_discard(struct xnthread *thread);
+
+/**
+ * @fn struct xnthread *xnthread_current(void)
+ * @brief Retrieve the current Cobalt core TCB.
+ *
+ * Returns the address of the current Cobalt core thread descriptor,
+ * or NULL if running over a regular Linux task. This call is not
+ * affected by the current runtime mode of the core thread.
+ *
+ * @note The returned value may differ from xnsched_current_thread()
+ * called from the same context, since the latter returns the root
+ * thread descriptor for the current CPU if the caller is running in
+ * secondary mode.
+ *
+ * @coretags{unrestricted}
+ */
+static inline struct xnthread *xnthread_current(void)
+{
+	return pipeline_current()->thread;
+}
+
+/**
+ * @fn struct xnthread *xnthread_from_task(struct task_struct *p)
+ * @brief Retrieve the Cobalt core TCB attached to a Linux task.
+ *
+ * Returns the address of the Cobalt core thread descriptor attached
+ * to the Linux task @a p, or NULL if @a p is a regular Linux
+ * task. This call is not affected by the current runtime mode of the
+ * core thread.
+ *
+ * @coretags{unrestricted}
+ */
+static inline struct xnthread *xnthread_from_task(struct task_struct *p)
+{
+	return pipeline_thread_from_task(p);
+}
+
+/**
+ * @fn void xnthread_test_cancel(void)
+ * @brief Introduce a thread cancellation point.
+ *
+ * Terminates the current thread if a cancellation request is pending
+ * for it, i.e. if xnthread_cancel() was called.
+ *
+ * @coretags{mode-unrestricted}
+ */
+static inline void xnthread_test_cancel(void)
+{
+	struct xnthread *curr = xnthread_current();
+
+	if (curr && xnthread_test_info(curr, XNCANCELD))
+		__xnthread_test_cancel(curr);
+}
+
+static inline
+void xnthread_complete_wait(struct xnthread_wait_context *wc)
+{
+	wc->posted = 1;
+}
+
+static inline
+int xnthread_wait_complete_p(struct xnthread_wait_context *wc)
+{
+	return wc->posted;
+}
+
+#ifdef CONFIG_XENO_ARCH_FPU
+void xnthread_switch_fpu(struct xnsched *sched);
+#else
+static inline void xnthread_switch_fpu(struct xnsched *sched) { }
+#endif /* CONFIG_XENO_ARCH_FPU */
+
+void xnthread_deregister(struct xnthread *thread);
+
+char *xnthread_format_status(unsigned long status,
+			     char *buf, int size);
+
+pid_t xnthread_host_pid(struct xnthread *thread);
+
+int xnthread_set_clock(struct xnthread *thread,
+		       struct xnclock *newclock);
+
+xnticks_t xnthread_get_timeout(struct xnthread *thread,
+			       xnticks_t ns);
+
+xnticks_t xnthread_get_period(struct xnthread *thread);
+
+void xnthread_prepare_wait(struct xnthread_wait_context *wc);
+
+int xnthread_init(struct xnthread *thread,
+		  const struct xnthread_init_attr *attr,
+		  struct xnsched_class *sched_class,
+		  const union xnsched_policy_param *sched_param);
+
+int xnthread_start(struct xnthread *thread,
+		   const struct xnthread_start_attr *attr);
+
+int xnthread_set_mode(int clrmask,
+		      int setmask);
+
+void xnthread_suspend(struct xnthread *thread,
+		      int mask,
+		      xnticks_t timeout,
+		      xntmode_t timeout_mode,
+		      struct xnsynch *wchan);
+
+void xnthread_resume(struct xnthread *thread,
+		     int mask);
+
+int xnthread_unblock(struct xnthread *thread);
+
+int xnthread_set_periodic(struct xnthread *thread,
+			  xnticks_t idate,
+			  xntmode_t timeout_mode,
+			  xnticks_t period);
+
+int xnthread_wait_period(unsigned long *overruns_r);
+
+int xnthread_set_slice(struct xnthread *thread,
+		       xnticks_t quantum);
+
+void xnthread_cancel(struct xnthread *thread);
+
+int xnthread_join(struct xnthread *thread, bool uninterruptible);
+
+int xnthread_harden(void);
+
+void xnthread_relax(int notify, int reason);
+
+void __xnthread_kick(struct xnthread *thread);
+
+void xnthread_kick(struct xnthread *thread);
+
+void __xnthread_demote(struct xnthread *thread);
+
+void xnthread_demote(struct xnthread *thread);
+
+void __xnthread_signal(struct xnthread *thread, int sig, int arg);
+
+void xnthread_signal(struct xnthread *thread, int sig, int arg);
+
+void xnthread_pin_initial(struct xnthread *thread);
+
+void xnthread_call_mayday(struct xnthread *thread, int reason);
+
+static inline void xnthread_get_resource(struct xnthread *curr)
+{
+	if (xnthread_test_state(curr, XNWEAK|XNDEBUG))
+		curr->res_count++;
+}
+
+static inline int xnthread_put_resource(struct xnthread *curr)
+{
+	if (xnthread_test_state(curr, XNWEAK) ||
+	    IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) {
+		if (unlikely(curr->res_count == 0)) {
+			if (xnthread_test_state(curr, XNWARN))
+				xnthread_signal(curr, SIGDEBUG,
+						SIGDEBUG_RESCNT_IMBALANCE);
+			return -EPERM;
+		}
+		curr->res_count--;
+	}
+
+	return 0;
+}
+
+static inline void xnthread_commit_ceiling(struct xnthread *curr)
+{
+	if (curr->u_window->pp_pending)
+		xnsynch_commit_ceiling(curr);
+}
+
+#ifdef CONFIG_SMP
+
+void xnthread_migrate_passive(struct xnthread *thread,
+			      struct xnsched *sched);
+#else
+
+static inline void xnthread_migrate_passive(struct xnthread *thread,
+					    struct xnsched *sched)
+{ }
+
+#endif
+
+int __xnthread_set_schedparam(struct xnthread *thread,
+			      struct xnsched_class *sched_class,
+			      const union xnsched_policy_param *sched_param);
+
+int xnthread_set_schedparam(struct xnthread *thread,
+			    struct xnsched_class *sched_class,
+			    const union xnsched_policy_param *sched_param);
+
+int xnthread_killall(int grace, int mask);
+
+void __xnthread_propagate_schedparam(struct xnthread *curr);
+
+static inline void xnthread_propagate_schedparam(struct xnthread *curr)
+{
+	if (xnthread_test_info(curr, XNSCHEDP))
+		__xnthread_propagate_schedparam(curr);
+}
+
+extern struct xnthread_personality xenomai_personality;
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_THREAD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/select.h	2022-03-21 12:58:31.535868035 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/lock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Efixo <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SELECT_H
+#define _COBALT_KERNEL_SELECT_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/thread.h>
+
+/**
+ * @addtogroup cobalt_core_select
+ * @{
+ */
+
+#define XNSELECT_READ      0
+#define XNSELECT_WRITE     1
+#define XNSELECT_EXCEPT    2
+#define XNSELECT_MAX_TYPES 3
+
+struct xnselector {
+	struct xnsynch synchbase;
+	struct fds {
+		fd_set expected;
+		fd_set pending;
+	} fds [XNSELECT_MAX_TYPES];
+	struct list_head destroy_link;
+	struct list_head bindings; /* only used by xnselector_destroy */
+};
+
+#define __NFDBITS__	(8 * sizeof(unsigned long))
+#define __FDSET_LONGS__	(__FD_SETSIZE/__NFDBITS__)
+#define	__FDELT__(d)	((d) / __NFDBITS__)
+#define	__FDMASK__(d)	(1UL << ((d) % __NFDBITS__))
+
+static inline void __FD_SET__(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
+}
+
+static inline void __FD_CLR__(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
+}
+
+static inline int __FD_ISSET__(unsigned long __fd, const __kernel_fd_set *__p)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
+}
+
+static inline void __FD_ZERO__(__kernel_fd_set *__p)
+{
+	unsigned long *__tmp = __p->fds_bits;
+	int __i;
+
+	__i = __FDSET_LONGS__;
+	while (__i) {
+		__i--;
+		*__tmp = 0;
+		__tmp++;
+	}
+}
+
+struct xnselect {
+	struct list_head bindings;
+};
+
+#define DECLARE_XNSELECT(name) struct xnselect name
+
+struct xnselect_binding {
+	struct xnselector *selector;
+	struct xnselect *fd;
+	unsigned int type;
+	unsigned int bit_index;
+	struct list_head link;  /* link in selected fds list. */
+	struct list_head slink; /* link in selector list */
+};
+
+void xnselect_init(struct xnselect *select_block);
+
+int xnselect_bind(struct xnselect *select_block,
+		  struct xnselect_binding *binding,
+		  struct xnselector *selector,
+		  unsigned int type,
+		  unsigned int bit_index,
+		  unsigned int state);
+
+int __xnselect_signal(struct xnselect *select_block, unsigned int state);
+
+/**
+ * Signal a file descriptor state change.
+ *
+ * @param select_block pointer to an @a xnselect structure representing the file
+ * descriptor whose state changed;
+ * @param state new value of the state.
+ *
+ * @retval 1 if rescheduling is needed;
+ * @retval 0 otherwise.
+ */
+static inline int
+xnselect_signal(struct xnselect *select_block, unsigned int state)
+{
+	if (!list_empty(&select_block->bindings))
+		return __xnselect_signal(select_block, state);
+
+	return 0;
+}
+
+void xnselect_destroy(struct xnselect *select_block);
+
+int xnselector_init(struct xnselector *selector);
+
+int xnselect(struct xnselector *selector,
+	     fd_set *out_fds[XNSELECT_MAX_TYPES],
+	     fd_set *in_fds[XNSELECT_MAX_TYPES],
+	     int nfds,
+	     xnticks_t timeout, xntmode_t timeout_mode);
+
+void xnselector_destroy(struct xnselector *selector);
+
+int xnselect_mount(void);
+
+int xnselect_umount(void);
+
+/** @} */
+
+#endif /* _COBALT_KERNEL_SELECT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/lock.h	2022-03-21 12:58:31.528868103 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/heap.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2008,2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_LOCK_H
+#define _COBALT_KERNEL_LOCK_H
+
+#include <pipeline/lock.h>
+#include <linux/percpu.h>
+#include <cobalt/kernel/assert.h>
+#include <pipeline/pipeline.h>
+
+/**
+ * @addtogroup cobalt_core_lock
+ *
+ * @{
+ */
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+
+struct xnlock {
+	unsigned owner;
+	arch_spinlock_t alock;
+	const char *file;
+	const char *function;
+	unsigned int line;
+	int cpu;
+	unsigned long long spin_time;
+	unsigned long long lock_date;
+};
+
+struct xnlockinfo {
+	unsigned long long spin_time;
+	unsigned long long lock_time;
+	const char *file;
+	const char *function;
+	unsigned int line;
+};
+
+#define XNARCH_LOCK_UNLOCKED (struct xnlock) {	\
+	~0,					\
+	__ARCH_SPIN_LOCK_UNLOCKED,		\
+	NULL,					\
+	NULL,					\
+	0,					\
+	-1,					\
+	0LL,					\
+	0LL,					\
+}
+
+#define XNLOCK_DBG_CONTEXT		, __FILE__, __LINE__, __FUNCTION__
+#define XNLOCK_DBG_CONTEXT_ARGS					\
+	, const char *file, int line, const char *function
+#define XNLOCK_DBG_PASS_CONTEXT		, file, line, function
+
+void xnlock_dbg_prepare_acquire(unsigned long long *start);
+void xnlock_dbg_prepare_spin(unsigned int *spin_limit);
+void xnlock_dbg_acquired(struct xnlock *lock, int cpu,
+			 unsigned long long *start,
+			 const char *file, int line,
+			 const char *function);
+int xnlock_dbg_release(struct xnlock *lock,
+			 const char *file, int line,
+			 const char *function);
+
+DECLARE_PER_CPU(struct xnlockinfo, xnlock_stats);
+
+#else /* !CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+struct xnlock {
+	unsigned owner;
+	arch_spinlock_t alock;
+};
+
+#define XNARCH_LOCK_UNLOCKED			\
+	(struct xnlock) {			\
+		~0,				\
+		__ARCH_SPIN_LOCK_UNLOCKED,	\
+	}
+
+#define XNLOCK_DBG_CONTEXT
+#define XNLOCK_DBG_CONTEXT_ARGS
+#define XNLOCK_DBG_PASS_CONTEXT
+
+static inline
+void xnlock_dbg_prepare_acquire(unsigned long long *start)
+{
+}
+
+static inline
+void xnlock_dbg_prepare_spin(unsigned int *spin_limit)
+{
+}
+
+static inline void
+xnlock_dbg_acquired(struct xnlock *lock, int cpu,
+		    unsigned long long *start)
+{
+}
+
+static inline int xnlock_dbg_release(struct xnlock *lock)
+{
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING)
+
+#define xnlock_get(lock)		__xnlock_get(lock  XNLOCK_DBG_CONTEXT)
+#define xnlock_put(lock)		__xnlock_put(lock  XNLOCK_DBG_CONTEXT)
+#define xnlock_get_irqsave(lock,x) \
+	((x) = __xnlock_get_irqsave(lock  XNLOCK_DBG_CONTEXT))
+#define xnlock_put_irqrestore(lock,x) \
+	__xnlock_put_irqrestore(lock,x  XNLOCK_DBG_CONTEXT)
+#define xnlock_clear_irqoff(lock)	xnlock_put_irqrestore(lock, 1)
+#define xnlock_clear_irqon(lock)	xnlock_put_irqrestore(lock, 0)
+
+static inline void xnlock_init (struct xnlock *lock)
+{
+	*lock = XNARCH_LOCK_UNLOCKED;
+}
+
+#define DECLARE_XNLOCK(lock)		struct xnlock lock
+#define DECLARE_EXTERN_XNLOCK(lock)	extern struct xnlock lock
+#define DEFINE_XNLOCK(lock)		struct xnlock lock = XNARCH_LOCK_UNLOCKED
+#define DEFINE_PRIVATE_XNLOCK(lock)	static DEFINE_XNLOCK(lock)
+
+static inline int ____xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	int cpu = raw_smp_processor_id();
+	unsigned long long start;
+
+	if (lock->owner == cpu)
+		return 2;
+
+	xnlock_dbg_prepare_acquire(&start);
+
+	arch_spin_lock(&lock->alock);
+	lock->owner = cpu;
+
+	xnlock_dbg_acquired(lock, cpu, &start /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return 0;
+}
+
+static inline void ____xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (xnlock_dbg_release(lock /*, */ XNLOCK_DBG_PASS_CONTEXT))
+		return;
+
+	lock->owner = ~0U;
+	arch_spin_unlock(&lock->alock);
+}
+
+#ifndef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK
+#define ___xnlock_get ____xnlock_get
+#define ___xnlock_put ____xnlock_put
+#else /* out of line xnlock */
+int ___xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS);
+
+void ___xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS);
+#endif /* out of line xnlock */
+
+static inline spl_t
+__xnlock_get_irqsave(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	unsigned long flags;
+
+	splhigh(flags);
+
+	if (__locking_active__)
+		flags |= ___xnlock_get(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return flags;
+}
+
+static inline void __xnlock_put_irqrestore(struct xnlock *lock, spl_t flags
+					   /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	/* Only release the lock if we didn't take it recursively. */
+	if (__locking_active__ && !(flags & 2))
+		___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	splexit(flags & 1);
+}
+
+static inline int xnlock_is_owner(struct xnlock *lock)
+{
+	if (__locking_active__)
+		return lock->owner == raw_smp_processor_id();
+
+	return 1;
+}
+
+static inline int __xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (__locking_active__)
+		return ___xnlock_get(lock /* , */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return 0;
+}
+
+static inline void __xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (__locking_active__)
+		___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+}
+
+#undef __locking_active__
+
+#else /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */
+
+#define xnlock_init(lock)		do { } while(0)
+#define xnlock_get(lock)		do { } while(0)
+#define xnlock_put(lock)		do { } while(0)
+#define xnlock_get_irqsave(lock,x)	splhigh(x)
+#define xnlock_put_irqrestore(lock,x)	splexit(x)
+#define xnlock_clear_irqoff(lock)	splmax()
+#define xnlock_clear_irqon(lock)	splnone()
+#define xnlock_is_owner(lock)		1
+
+#define DECLARE_XNLOCK(lock)
+#define DECLARE_EXTERN_XNLOCK(lock)
+#define DEFINE_XNLOCK(lock)
+#define DEFINE_PRIVATE_XNLOCK(lock)
+
+#endif /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */
+
+DECLARE_EXTERN_XNLOCK(nklock);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_LOCK_H */
+++ linux-patched/include/xenomai/cobalt/kernel/heap.h	2022-03-21 12:58:31.521868171 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/trace.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_HEAP_H
+#define _COBALT_KERNEL_HEAP_H
+
+#include <linux/string.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/uapi/kernel/heap.h>
+
+/**
+ * @addtogroup cobalt_core_heap
+ * @{
+ */
+
+#define XNHEAP_PAGE_SHIFT	9 /* 2^9 => 512 bytes */
+#define XNHEAP_PAGE_SIZE	(1UL << XNHEAP_PAGE_SHIFT)
+#define XNHEAP_PAGE_MASK	(~(XNHEAP_PAGE_SIZE - 1))
+#define XNHEAP_MIN_LOG2		4 /* 16 bytes */
+/*
+ * Use bucketed memory for sizes between 2^XNHEAP_MIN_LOG2 and
+ * 2^(XNHEAP_PAGE_SHIFT-1).
+ */
+#define XNHEAP_MAX_BUCKETS	(XNHEAP_PAGE_SHIFT - XNHEAP_MIN_LOG2)
+#define XNHEAP_MIN_ALIGN	(1U << XNHEAP_MIN_LOG2)
+/* Maximum size of a heap (4Gb - PAGE_SIZE). */
+#define XNHEAP_MAX_HEAPSZ	(4294967295U - PAGE_SIZE + 1)
+/* Bits we need for encoding a page # */
+#define XNHEAP_PGENT_BITS      (32 - XNHEAP_PAGE_SHIFT)
+/* Each page is represented by a page map entry. */
+#define XNHEAP_PGMAP_BYTES	sizeof(struct xnheap_pgentry)
+
+struct xnheap_pgentry {
+	/* Linkage in bucket list. */
+	unsigned int prev : XNHEAP_PGENT_BITS;
+	unsigned int next : XNHEAP_PGENT_BITS;
+	/*  page_list or log2. */
+	unsigned int type : 6;
+	/*
+	 * We hold either a spatial map of busy blocks within the page
+	 * for bucketed memory (up to 32 blocks per page), or the
+	 * overall size of the multi-page block if entry.type ==
+	 * page_list.
+	 */
+	union {
+		u32 map;
+		u32 bsize;
+	};
+};
+
+/*
+ * A range descriptor is stored at the beginning of the first page of
+ * a range of free pages. xnheap_range.size is nrpages *
+ * XNHEAP_PAGE_SIZE. Ranges are indexed by address and size in
+ * rbtrees.
+ */
+struct xnheap_range {
+	struct rb_node addr_node;
+	struct rb_node size_node;
+	size_t size;
+};
+
+struct xnheap {
+	void *membase;
+	struct rb_root addr_tree;
+	struct rb_root size_tree;
+	struct xnheap_pgentry *pagemap;
+	size_t usable_size;
+	size_t used_size;
+	u32 buckets[XNHEAP_MAX_BUCKETS];
+	char name[XNOBJECT_NAME_LEN];
+	DECLARE_XNLOCK(lock);
+	struct list_head next;
+};
+
+extern struct xnheap cobalt_heap;
+
+#define xnmalloc(size)     xnheap_alloc(&cobalt_heap, size)
+#define xnfree(ptr)        xnheap_free(&cobalt_heap, ptr)
+
+static inline void *xnheap_get_membase(const struct xnheap *heap)
+{
+	return heap->membase;
+}
+
+static inline
+size_t xnheap_get_size(const struct xnheap *heap)
+{
+	return heap->usable_size;
+}
+
+static inline
+size_t xnheap_get_used(const struct xnheap *heap)
+{
+	return heap->used_size;
+}
+
+static inline
+size_t xnheap_get_free(const struct xnheap *heap)
+{
+	return heap->usable_size - heap->used_size;
+}
+
+int xnheap_init(struct xnheap *heap,
+		void *membase, size_t size);
+
+void xnheap_destroy(struct xnheap *heap);
+
+void *xnheap_alloc(struct xnheap *heap, size_t size);
+
+void xnheap_free(struct xnheap *heap, void *block);
+
+ssize_t xnheap_check_block(struct xnheap *heap, void *block);
+
+void xnheap_set_name(struct xnheap *heap,
+		     const char *name, ...);
+
+void *xnheap_vmalloc(size_t size);
+
+void xnheap_vfree(void *p);
+
+static inline void *xnheap_zalloc(struct xnheap *heap, size_t size)
+{
+	void *p;
+
+	p = xnheap_alloc(heap, size);
+	if (p)
+		memset(p, 0, size);
+
+	return p;
+}
+
+static inline char *xnstrdup(const char *s)
+{
+	char *p;
+
+	p = xnmalloc(strlen(s) + 1);
+	if (p == NULL)
+		return NULL;
+
+	return strcpy(p, s);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+void xnheap_init_proc(void);
+void xnheap_cleanup_proc(void);
+#else /* !CONFIG_XENO_OPT_VFILE */
+static inline void xnheap_init_proc(void) { }
+static inline void xnheap_cleanup_proc(void) { }
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_HEAP_H */
+++ linux-patched/include/xenomai/cobalt/kernel/trace.h	2022-03-21 12:58:31.513868249 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/clock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_TRACE_H
+#define _COBALT_KERNEL_TRACE_H
+
+#include <pipeline/trace.h>
+
+#endif /* !_COBALT_KERNEL_TRACE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/clock.h	2022-03-21 12:58:31.506868318 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/list.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006,2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_CLOCK_H
+#define _COBALT_KERNEL_CLOCK_H
+
+#include <pipeline/pipeline.h>
+#include <pipeline/clock.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/uapi/kernel/types.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @addtogroup cobalt_core_clock
+ * @{
+ */
+
+struct xnsched;
+struct xntimerdata;
+struct __kernel_timex;
+
+struct xnclock_gravity {
+	unsigned long irq;
+	unsigned long kernel;
+	unsigned long user;
+};
+
+struct xnclock {
+	/** (ns) */
+	xnsticks_t wallclock_offset;
+	/** (ns) */
+	xnticks_t resolution;
+	/** (raw clock ticks). */
+	struct xnclock_gravity gravity;
+	/** Clock name. */
+	const char *name;
+	struct {
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+		xnticks_t (*read_raw)(struct xnclock *clock);
+		xnticks_t (*read_monotonic)(struct xnclock *clock);
+		int (*set_time)(struct xnclock *clock,
+				const struct timespec64 *ts);
+		xnsticks_t (*ns_to_ticks)(struct xnclock *clock,
+					  xnsticks_t ns);
+		xnsticks_t (*ticks_to_ns)(struct xnclock *clock,
+					  xnsticks_t ticks);
+		xnsticks_t (*ticks_to_ns_rounded)(struct xnclock *clock,
+						  xnsticks_t ticks);
+		void (*program_local_shot)(struct xnclock *clock,
+					   struct xnsched *sched);
+		void (*program_remote_shot)(struct xnclock *clock,
+					    struct xnsched *sched);
+#endif
+		int (*adjust_time)(struct xnclock *clock,
+				   struct __kernel_timex *tx);
+		int (*set_gravity)(struct xnclock *clock,
+				   const struct xnclock_gravity *p);
+		void (*reset_gravity)(struct xnclock *clock);
+#ifdef CONFIG_XENO_OPT_VFILE
+		void (*print_status)(struct xnclock *clock,
+				     struct xnvfile_regular_iterator *it);
+#endif
+	} ops;
+	/* Private section. */
+	struct xntimerdata *timerdata;
+	int id;
+#ifdef CONFIG_SMP
+	/** Possible CPU affinity of clock beat. */
+	cpumask_t affinity;
+#endif
+#ifdef CONFIG_XENO_OPT_STATS
+	struct xnvfile_snapshot timer_vfile;
+	struct xnvfile_rev_tag timer_revtag;
+	struct list_head timerq;
+	int nrtimers;
+#endif /* CONFIG_XENO_OPT_STATS */
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnvfile_regular vfile;
+#endif
+};
+
+struct xnclock_ratelimit_state {
+	xnticks_t interval;
+	xnticks_t begin;
+	int burst;
+	int printed;
+	int missed;
+};
+
+extern struct xnclock nkclock;
+
+int xnclock_register(struct xnclock *clock,
+		     const cpumask_t *affinity);
+
+void xnclock_deregister(struct xnclock *clock);
+
+void xnclock_tick(struct xnclock *clock);
+
+void xnclock_core_local_shot(struct xnsched *sched);
+
+void xnclock_core_remote_shot(struct xnsched *sched);
+
+xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns);
+
+xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks);
+
+xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks);
+
+xnticks_t xnclock_core_read_monotonic(void);
+
+static inline xnticks_t xnclock_core_read_raw(void)
+{
+	return pipeline_read_cycle_counter();
+}
+
+/* We use the Linux defaults */
+#define XN_RATELIMIT_INTERVAL	5000000000LL
+#define XN_RATELIMIT_BURST	10
+
+int __xnclock_ratelimit(struct xnclock_ratelimit_state *rs, const char *func);
+
+#define xnclock_ratelimit()	({					\
+	static struct xnclock_ratelimit_state __state = {		\
+		.interval	= XN_RATELIMIT_INTERVAL,		\
+		.burst		= XN_RATELIMIT_BURST,			\
+	};								\
+	__xnclock_ratelimit(&__state, __func__);			\
+})
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+
+static inline void xnclock_program_shot(struct xnclock *clock,
+					struct xnsched *sched)
+{
+	if (likely(clock == &nkclock))
+		xnclock_core_local_shot(sched);
+	else if (clock->ops.program_local_shot)
+		clock->ops.program_local_shot(clock, sched);
+}
+
+static inline void xnclock_remote_shot(struct xnclock *clock,
+				       struct xnsched *sched)
+{
+#ifdef CONFIG_SMP
+	if (likely(clock == &nkclock))
+		xnclock_core_remote_shot(sched);
+	else if (clock->ops.program_remote_shot)
+		clock->ops.program_remote_shot(clock, sched);
+#endif
+}
+
+static inline xnticks_t xnclock_read_raw(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_read_raw();
+
+	return clock->ops.read_raw(clock);
+}
+
+static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock,
+					     xnsticks_t ns)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ns_to_ticks(ns);
+
+	return clock->ops.ns_to_ticks(clock, ns);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock,
+					     xnsticks_t ticks)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ticks_to_ns(ticks);
+
+	return clock->ops.ticks_to_ns(clock, ticks);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock,
+						     xnsticks_t ticks)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ticks_to_ns_rounded(ticks);
+
+	return clock->ops.ticks_to_ns_rounded(clock, ticks);
+}
+
+static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_read_monotonic();
+
+	return clock->ops.read_monotonic(clock);
+}
+
+static inline int xnclock_set_time(struct xnclock *clock,
+				   const struct timespec64 *ts)
+{
+	if (likely(clock == &nkclock))
+		return -EINVAL;
+
+	return clock->ops.set_time(clock, ts);
+}
+
+#else /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline void xnclock_program_shot(struct xnclock *clock,
+					struct xnsched *sched)
+{
+	xnclock_core_local_shot(sched);
+}
+
+static inline void xnclock_remote_shot(struct xnclock *clock,
+				       struct xnsched *sched)
+{
+#ifdef CONFIG_SMP
+	xnclock_core_remote_shot(sched);
+#endif
+}
+
+static inline xnticks_t xnclock_read_raw(struct xnclock *clock)
+{
+	return xnclock_core_read_raw();
+}
+
+static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock,
+					     xnsticks_t ns)
+{
+	return xnclock_core_ns_to_ticks(ns);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock,
+					     xnsticks_t ticks)
+{
+	return xnclock_core_ticks_to_ns(ticks);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock,
+						     xnsticks_t ticks)
+{
+	return xnclock_core_ticks_to_ns_rounded(ticks);
+}
+
+static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock)
+{
+	return xnclock_core_read_monotonic();
+}
+
+static inline int xnclock_set_time(struct xnclock *clock,
+				   const struct timespec64 *ts)
+{
+	/*
+	 * There is no way to change the core clock's idea of time.
+	 */
+	return -EINVAL;
+}
+
+#endif /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline int xnclock_adjust_time(struct xnclock *clock,
+				      struct __kernel_timex *tx)
+{
+	if (clock->ops.adjust_time == NULL)
+		return -EOPNOTSUPP;
+
+	return clock->ops.adjust_time(clock, tx);
+}
+
+static inline xnticks_t xnclock_get_offset(struct xnclock *clock)
+{
+	return clock->wallclock_offset;
+}
+
+static inline xnticks_t xnclock_get_resolution(struct xnclock *clock)
+{
+	return clock->resolution; /* ns */
+}
+
+static inline void xnclock_set_resolution(struct xnclock *clock,
+					  xnticks_t resolution)
+{
+	clock->resolution = resolution; /* ns */
+}
+
+static inline int xnclock_set_gravity(struct xnclock *clock,
+				      const struct xnclock_gravity *gravity)
+{
+	if (clock->ops.set_gravity)
+		return clock->ops.set_gravity(clock, gravity);
+
+	return -EINVAL;
+}
+
+static inline void xnclock_reset_gravity(struct xnclock *clock)
+{
+	if (clock->ops.reset_gravity)
+		clock->ops.reset_gravity(clock);
+}
+
+#define xnclock_get_gravity(__clock, __type)  ((__clock)->gravity.__type)
+
+static inline xnticks_t xnclock_read_realtime(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return pipeline_read_wallclock();
+	/*
+	 * Return an adjusted value of the monotonic time with the
+	 * translated system wallclock offset.
+	 */
+	return xnclock_read_monotonic(clock) + xnclock_get_offset(clock);
+}
+
+void xnclock_apply_offset(struct xnclock *clock,
+			  xnsticks_t delta_ns);
+
+void xnclock_set_wallclock(xnticks_t epoch_ns);
+
+unsigned long long xnclock_divrem_billion(unsigned long long value,
+					  unsigned long *rem);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+void xnclock_init_proc(void);
+
+void xnclock_cleanup_proc(void);
+
+static inline void xnclock_print_status(struct xnclock *clock,
+					struct xnvfile_regular_iterator *it)
+{
+	if (clock->ops.print_status)
+		clock->ops.print_status(clock, it);
+}
+
+#else
+static inline void xnclock_init_proc(void) { }
+static inline void xnclock_cleanup_proc(void) { }
+#endif
+
+int xnclock_init(void);
+
+void xnclock_cleanup(void);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_CLOCK_H */
+++ linux-patched/include/xenomai/cobalt/kernel/list.h	2022-03-21 12:58:31.498868396 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/linux/xenomai/wrappers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_LIST_H
+#define _COBALT_KERNEL_LIST_H
+
+#include <linux/list.h>
+
+#define __list_add_pri(__new, __head, __member_pri, __member_next, __relop)	\
+do {										\
+	typeof(*__new) *__pos;							\
+	if (list_empty(__head))							\
+		list_add(&(__new)->__member_next, __head);		 	\
+	else {									\
+		list_for_each_entry_reverse(__pos, __head, __member_next) {	\
+			if ((__new)->__member_pri __relop __pos->__member_pri)	\
+				break;						\
+		}								\
+		list_add(&(__new)->__member_next, &__pos->__member_next); 	\
+	}									\
+} while (0)
+
+#define list_add_priff(__new, __head, __member_pri, __member_next)		\
+	__list_add_pri(__new, __head, __member_pri, __member_next, <=)
+
+#define list_add_prilf(__new, __head, __member_pri, __member_next)		\
+	__list_add_pri(__new, __head, __member_pri, __member_next, <)
+
+#define list_get_entry(__head, __type, __member)		\
+  ({								\
+	  __type *__item;					\
+	  __item = list_first_entry(__head, __type, __member);	\
+	  list_del(&__item->__member);				\
+	  __item;						\
+  })
+
+#define list_get_entry_init(__head, __type, __member)		\
+  ({								\
+	  __type *__item;					\
+	  __item = list_first_entry(__head, __type, __member);	\
+	  list_del_init(&__item->__member);			\
+	  __item;						\
+  })
+
+#ifndef list_next_entry
+#define list_next_entry(__item, __member)			\
+	list_entry((__item)->__member.next, typeof(*(__item)), __member)
+#endif
+
+#endif /* !_COBALT_KERNEL_LIST_H_ */
+++ linux-patched/include/linux/xenomai/wrappers.h	2022-03-21 12:58:28.942893320 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/wrappers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_LINUX_WRAPPERS_H
+#define _COBALT_LINUX_WRAPPERS_H
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)
+#include <linux/signal.h>
+typedef siginfo_t kernel_siginfo_t;
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#else
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/rt.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <uapi/linux/sched/types.h>
+#endif
+
+#include <pipeline/wrappers.h>
+
+#endif /* !_COBALT_LINUX_WRAPPERS_H */
+++ linux-patched/include/asm-generic/xenomai/wrappers.h	2022-03-21 12:58:28.937893369 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_WRAPPERS_H
+
+#include <linux/xenomai/wrappers.h>
+
+#define COBALT_BACKPORT(__sym) __cobalt_backport_ ##__sym
+
+/*
+ * To keep the #ifdefery as readable as possible, please:
+ *
+ * - keep the conditional structure flat, no nesting (e.g. do not fold
+ *   the pre-3.11 conditions into the pre-3.14 ones).
+ * - group all wrappers for a single kernel revision.
+ * - list conditional blocks in order of kernel release, latest first
+ * - identify the first kernel release for which the wrapper should
+ *   be defined, instead of testing the existence of a preprocessor
+ *   symbol, so that obsolete wrappers can be spotted.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
+#define raw_copy_to_user(__to, __from, __n)	__copy_to_user_inatomic(__to, __from, __n)
+#define raw_copy_from_user(__to, __from, __n)	__copy_from_user_inatomic(__to, __from, __n)
+#define raw_put_user(__from, __to)		__put_user_inatomic(__from, __to)
+#define raw_get_user(__to, __from)		__get_user_inatomic(__to, __from)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)
+#define in_ia32_syscall() (current_thread_info()->status & TS_COMPAT)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)
+#define cobalt_gpiochip_dev(__gc)	((__gc)->dev)
+#else
+#define cobalt_gpiochip_dev(__gc)	((__gc)->parent)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0)
+#define cobalt_get_restart_block(p)	(&task_thread_info(p)->restart_block)
+#else
+#define cobalt_get_restart_block(p)	(&(p)->restart_block)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
+#define user_msghdr msghdr
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+#include <linux/netdevice.h>
+
+#undef alloc_netdev
+#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
+	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
+ 
+#include <linux/trace_seq.h>
+
+static inline unsigned char *
+trace_seq_buffer_ptr(struct trace_seq *s)
+{
+	return s->buffer + s->len;
+}
+
+#endif /* < 3.17 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
+#define smp_mb__before_atomic()  smp_mb()
+#define smp_mb__after_atomic()   smp_mb()
+#endif /* < 3.16 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
+#define raw_cpu_ptr(v)	__this_cpu_ptr(v)
+#endif /* < 3.15 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
+#include <linux/pci.h>
+
+#ifdef CONFIG_PCI
+#define pci_enable_msix_range COBALT_BACKPORT(pci_enable_msix_range)
+#ifdef CONFIG_PCI_MSI
+int pci_enable_msix_range(struct pci_dev *dev,
+			  struct msix_entry *entries,
+			  int minvec, int maxvec);
+#else /* !CONFIG_PCI_MSI */
+static inline
+int pci_enable_msix_range(struct pci_dev *dev,
+			  struct msix_entry *entries,
+			  int minvec, int maxvec)
+{
+	return -ENOSYS;
+}
+#endif /* !CONFIG_PCI_MSI */
+#endif /* CONFIG_PCI */
+#endif /* < 3.14 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)
+#include <linux/dma-mapping.h>
+#include <linux/hwmon.h>
+
+#define dma_set_mask_and_coherent COBALT_BACKPORT(dma_set_mask_and_coherent)
+static inline
+int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+	int rc = dma_set_mask(dev, mask);
+	if (rc == 0)
+		dma_set_coherent_mask(dev, mask);
+	return rc;
+}
+
+#ifdef CONFIG_HWMON
+#define hwmon_device_register_with_groups \
+	COBALT_BACKPORT(hwmon_device_register_with_groups)
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups);
+
+#define devm_hwmon_device_register_with_groups \
+	COBALT_BACKPORT(devm_hwmon_device_register_with_groups)
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups);
+#endif /* !CONFIG_HWMON */
+
+#define reinit_completion(__x)	INIT_COMPLETION(*(__x))
+
+#endif /* < 3.13 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
+#define DEVICE_ATTR_RW(_name)	__ATTR_RW(_name)
+#define DEVICE_ATTR_RO(_name)	__ATTR_RO(_name)
+#define DEVICE_ATTR_WO(_name)	__ATTR_WO(_name)
+#endif /* < 3.11 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
+#error "Xenomai/cobalt requires Linux kernel 3.10 or above"
+#endif /* < 3.10 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)
+#define __kernel_timex		timex
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)
+#define old_timex32		compat_timex
+#define SO_RCVTIMEO_OLD		SO_RCVTIMEO
+#define SO_SNDTIMEO_OLD		SO_SNDTIMEO
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0)
+#define mmiowb()		do { } while (0)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#define __kernel_old_timeval	timeval
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
+#define mmap_read_lock(__mm)	down_read(&mm->mmap_sem)
+#define mmap_read_unlock(__mm)	up_read(&mm->mmap_sem)
+#define mmap_write_lock(__mm)	down_write(&mm->mmap_sem)
+#define mmap_write_unlock(__mm)	up_write(&mm->mmap_sem)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
+#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write) \
+	struct file_operations __name = {			    \
+		.open = (__open),				    \
+		.release = (__release),				    \
+		.read = (__read),				    \
+		.write = (__write),				    \
+		.llseek = seq_lseek,				    \
+}
+#else
+#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write)	\
+	struct proc_ops __name = {					\
+		.proc_open = (__open),					\
+		.proc_release = (__release),				\
+		.proc_read = (__read),					\
+		.proc_write = (__write),				\
+		.proc_lseek = seq_lseek,				\
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL)
+#else
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)
+#define pci_aer_clear_nonfatal_status	pci_cleanup_aer_uncorrect_error_status
+#define old_timespec32    compat_timespec
+#define old_itimerspec32  compat_itimerspec
+#define old_timeval32     compat_timeval
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL)
+#else
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)
+#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \
+	({								\
+		loff_t ___file_size;					\
+		int __ret;						\
+		__ret = kernel_read_file(__file, __buf, &___file_size,	\
+				__buf_size, __id);			\
+		(*__file_size) = ___file_size;				\
+		__ret;							\
+	})
+#else
+#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \
+	kernel_read_file(__file, 0, __buf, __buf_size, __file_size, __id)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#if __has_attribute(__fallthrough__)
+# define fallthrough			__attribute__((__fallthrough__))
+#else
+# define fallthrough			do {} while (0)  /* fallthrough */
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
+#define IRQ_WORK_INIT(_func) (struct irq_work) {	\
+	.flags = ATOMIC_INIT(0),			\
+	.func = (_func),				\
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,14,0)
+#define close_fd(__ufd)	__close_fd(current->files, __ufd)
+#endif
+
+#endif /* _COBALT_ASM_GENERIC_WRAPPERS_H */
+++ linux-patched/include/asm-generic/xenomai/syscall.h	2022-03-21 12:58:28.933893408 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/pci_ids.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_SYSCALL_H
+#define _COBALT_ASM_GENERIC_SYSCALL_H
+
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/wrappers.h>
+#include <asm/xenomai/machine.h>
+#include <cobalt/uapi/asm-generic/syscall.h>
+#include <cobalt/uapi/kernel/types.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+#define access_rok(addr, size)	access_ok((addr), (size))
+#define access_wok(addr, size)	access_ok((addr), (size))
+#else
+#define access_rok(addr, size)	access_ok(VERIFY_READ, (addr), (size))
+#define access_wok(addr, size)	access_ok(VERIFY_WRITE, (addr), (size))
+#endif
+
+#define __xn_copy_from_user(dstP, srcP, n)	raw_copy_from_user(dstP, srcP, n)
+#define __xn_copy_to_user(dstP, srcP, n)	raw_copy_to_user(dstP, srcP, n)
+#define __xn_put_user(src, dstP)		__put_user(src, dstP)
+#define __xn_get_user(dst, srcP)		__get_user(dst, srcP)
+#define __xn_strncpy_from_user(dstP, srcP, n)	strncpy_from_user(dstP, srcP, n)
+
+static inline int cobalt_copy_from_user(void *dst, const void __user *src,
+					size_t size)
+{
+	size_t remaining = size;
+
+	if (likely(access_rok(src, size)))
+		remaining = __xn_copy_from_user(dst, src, size);
+
+	if (unlikely(remaining > 0)) {
+		memset(dst + (size - remaining), 0, remaining);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static inline int cobalt_copy_to_user(void __user *dst, const void *src,
+				      size_t size)
+{
+	if (unlikely(!access_wok(dst, size) ||
+	    __xn_copy_to_user(dst, src, size)))
+		return -EFAULT;
+	return 0;
+}
+
+static inline int cobalt_strncpy_from_user(char *dst, const char __user *src,
+					   size_t count)
+{
+	if (unlikely(!access_rok(src, 1)))
+		return -EFAULT;
+
+	return __xn_strncpy_from_user(dst, src, count);
+}
+
+
+/*
+ * NOTE: those copy helpers won't work in compat mode: use
+ * sys32_get_*(), sys32_put_*() instead.
+ */
+
+static inline int cobalt_get_u_timespec(struct timespec64 *dst,
+			const struct __user_old_timespec __user *src)
+{
+	struct __user_old_timespec u_ts;
+	int ret;
+
+	ret = cobalt_copy_from_user(&u_ts, src, sizeof(u_ts));
+	if (ret)
+		return ret;
+
+	dst->tv_sec = u_ts.tv_sec;
+	dst->tv_nsec = u_ts.tv_nsec;
+
+	return 0;
+}
+
+static inline int cobalt_put_u_timespec(
+	struct __user_old_timespec __user *dst,
+	const struct timespec64 *src)
+{
+	struct __user_old_timespec u_ts;
+	int ret;
+
+	u_ts.tv_sec = src->tv_sec;
+	u_ts.tv_nsec = src->tv_nsec;
+
+	ret = cobalt_copy_to_user(dst, &u_ts, sizeof(*dst));
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static inline int cobalt_get_u_itimerspec(struct itimerspec64 *dst,
+			const struct __user_old_itimerspec __user *src)
+{
+	struct __user_old_itimerspec u_its;
+	int ret;
+
+	ret = cobalt_copy_from_user(&u_its, src, sizeof(u_its));
+	if (ret)
+		return ret;
+
+	dst->it_interval.tv_sec = u_its.it_interval.tv_sec;
+	dst->it_interval.tv_nsec = u_its.it_interval.tv_nsec;
+	dst->it_value.tv_sec = u_its.it_value.tv_sec;
+	dst->it_value.tv_nsec = u_its.it_value.tv_nsec;
+
+	return 0;
+}
+
+static inline int cobalt_put_u_itimerspec(
+	struct __user_old_itimerspec __user *dst,
+	const struct itimerspec64 *src)
+{
+	struct __user_old_itimerspec u_its;
+
+	u_its.it_interval.tv_sec = src->it_interval.tv_sec;
+	u_its.it_interval.tv_nsec = src->it_interval.tv_nsec;
+	u_its.it_value.tv_sec = src->it_value.tv_sec;
+	u_its.it_value.tv_nsec = src->it_value.tv_nsec;
+
+	return cobalt_copy_to_user(dst, &u_its, sizeof(*dst));
+}
+
+/* 32bit syscall emulation */
+#define __COBALT_COMPAT_BIT	0x1
+/* 32bit syscall emulation - extended form */
+#define __COBALT_COMPATX_BIT	0x2
+
+#endif /* !_COBALT_ASM_GENERIC_SYSCALL_H */
+++ linux-patched/include/asm-generic/xenomai/pci_ids.h	2022-03-21 12:58:28.930893437 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/ipipe/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_PCI_IDS_H
+#define _COBALT_ASM_GENERIC_PCI_IDS_H
+
+#include <linux/pci_ids.h>
+
+/* SMI */
+#ifndef PCI_DEVICE_ID_INTEL_ESB2_0
+#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH7_0
+#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH7_1
+#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH8_4
+#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH9_1
+#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH9_5
+#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH10_1
+#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_PCH_LPC_MIN
+#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00
+#endif
+
+/* RTCAN */
+#ifndef PCI_VENDOR_ID_ESDGMBH
+#define PCI_VENDOR_ID_ESDGMBH 0x12fe
+#endif
+#ifndef PCI_DEVICE_ID_PLX_9030
+#define PCI_DEVICE_ID_PLX_9030 0x9030
+#endif
+#ifndef PCI_DEVICE_ID_PLX_9056
+#define PCI_DEVICE_ID_PLX_9056 0x9056
+#endif
+
+#endif /* _COBALT_ASM_GENERIC_PCI_IDS_H */
+++ linux-patched/include/asm-generic/xenomai/ipipe/thread.h	2022-03-21 12:58:28.926893476 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/machine.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_IPIPE_THREAD_H
+#define _COBALT_ASM_GENERIC_IPIPE_THREAD_H
+
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+
+struct task_struct;
+
+struct xntcb {
+	struct task_struct *host_task;
+	struct thread_struct *tsp;
+	struct mm_struct *mm;
+	struct mm_struct *active_mm;
+	struct thread_struct ts;
+	struct thread_info *tip;
+#ifdef CONFIG_XENO_ARCH_FPU
+	struct task_struct *user_fpu_owner;
+#endif
+};
+
+#endif /* !_COBALT_ASM_GENERIC_IPIPE_THREAD_H */
+++ linux-patched/include/asm-generic/xenomai/machine.h	2022-03-21 12:58:28.923893505 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/dovetail/thread.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ *   Copyright &copy; 2012 Philippe Gerum.
+ *
+ *   Xenomai is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_MACHINE_H
+#define _COBALT_ASM_GENERIC_MACHINE_H
+
+#include <pipeline/machine.h>
+
+#ifndef xnarch_cache_aliasing
+#define xnarch_cache_aliasing()  0
+#endif
+
+#endif /* !_COBALT_ASM_GENERIC_MACHINE_H */
+++ linux-patched/include/asm-generic/xenomai/dovetail/thread.h	2022-03-21 12:58:28.919893545 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/syscall32.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _COBALT_ASM_GENERIC_DOVETAIL_THREAD_H
+#define _COBALT_ASM_GENERIC_DOVETAIL_THREAD_H
+
+#include <linux/dovetail.h>
+
+struct xnarchtcb {
+	struct dovetail_altsched_context altsched;
+};
+
+static inline
+struct task_struct *xnarch_host_task(struct xnarchtcb *tcb)
+{
+	return tcb->altsched.task;
+}
+
+#endif /* !_COBALT_ASM_GENERIC_DOVETAIL_THREAD_H */
+++ linux-patched/include/asm-generic/xenomai/syscall32.h	2022-03-21 12:58:28.916893574 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/trace/events/cobalt-posix.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_SYSCALL32_H
+#define _COBALT_ASM_GENERIC_SYSCALL32_H
+
+#define __COBALT_CALL32_INITHAND(__handler)
+
+#define __COBALT_CALL32_INITMODE(__mode)
+
+#define __COBALT_CALL32_ENTRY(__name, __handler)
+
+#define __COBALT_CALL_COMPAT(__reg)	0
+
+#endif /* !_COBALT_ASM_GENERIC_SYSCALL32_H */
+++ linux-patched/include/trace/events/cobalt-posix.h	2022-03-21 12:58:28.910893632 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/trace/events/cobalt-rtdm.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cobalt_posix
+
+#if !defined(_TRACE_COBALT_POSIX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COBALT_POSIX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include <xenomai/posix/cond.h>
+#include <xenomai/posix/mqueue.h>
+#include <xenomai/posix/event.h>
+
+#define __timespec_fields(__name)				\
+	__field(time64_t, tv_sec_##__name)			\
+	__field(long, tv_nsec_##__name)
+
+#define __assign_timespec(__to, __from)				\
+	do {							\
+		__entry->tv_sec_##__to = (__from)->tv_sec;	\
+		__entry->tv_nsec_##__to = (__from)->tv_nsec;	\
+	} while (0)
+
+#define __timespec_args(__name)					\
+	(long long)__entry->tv_sec_##__name, __entry->tv_nsec_##__name
+
+#ifdef CONFIG_IA32_EMULATION
+#define __sc_compat(__name)	, { sc_cobalt_##__name + __COBALT_IA32_BASE, "compat-" #__name }
+#else
+#define __sc_compat(__name)
+#endif
+
+#define __cobalt_symbolic_syscall(__name)				\
+	{ sc_cobalt_##__name, #__name }					\
+	__sc_compat(__name)						\
+
+#define __cobalt_syscall_name(__nr)					\
+	__print_symbolic((__nr),					\
+		__cobalt_symbolic_syscall(bind),			\
+		__cobalt_symbolic_syscall(thread_create),		\
+		__cobalt_symbolic_syscall(thread_getpid),		\
+		__cobalt_symbolic_syscall(thread_setmode),		\
+		__cobalt_symbolic_syscall(thread_setname),		\
+		__cobalt_symbolic_syscall(thread_join),			\
+		__cobalt_symbolic_syscall(thread_kill),			\
+		__cobalt_symbolic_syscall(thread_setschedparam_ex),	\
+		__cobalt_symbolic_syscall(thread_getschedparam_ex),	\
+		__cobalt_symbolic_syscall(thread_setschedprio),		\
+		__cobalt_symbolic_syscall(thread_getstat),		\
+		__cobalt_symbolic_syscall(sem_init),			\
+		__cobalt_symbolic_syscall(sem_destroy),			\
+		__cobalt_symbolic_syscall(sem_post),			\
+		__cobalt_symbolic_syscall(sem_wait),			\
+		__cobalt_symbolic_syscall(sem_trywait),			\
+		__cobalt_symbolic_syscall(sem_getvalue),		\
+		__cobalt_symbolic_syscall(sem_open),			\
+		__cobalt_symbolic_syscall(sem_close),			\
+		__cobalt_symbolic_syscall(sem_unlink),			\
+		__cobalt_symbolic_syscall(sem_timedwait),		\
+		__cobalt_symbolic_syscall(sem_inquire),			\
+		__cobalt_symbolic_syscall(sem_broadcast_np),		\
+		__cobalt_symbolic_syscall(clock_getres),		\
+		__cobalt_symbolic_syscall(clock_gettime),		\
+		__cobalt_symbolic_syscall(clock_settime),		\
+		__cobalt_symbolic_syscall(clock_nanosleep),		\
+		__cobalt_symbolic_syscall(mutex_init),			\
+		__cobalt_symbolic_syscall(mutex_check_init),		\
+		__cobalt_symbolic_syscall(mutex_destroy),		\
+		__cobalt_symbolic_syscall(mutex_lock),			\
+		__cobalt_symbolic_syscall(mutex_timedlock),		\
+		__cobalt_symbolic_syscall(mutex_trylock),		\
+		__cobalt_symbolic_syscall(mutex_unlock),		\
+		__cobalt_symbolic_syscall(cond_init),			\
+		__cobalt_symbolic_syscall(cond_destroy),		\
+		__cobalt_symbolic_syscall(cond_wait_prologue),		\
+		__cobalt_symbolic_syscall(cond_wait_epilogue),		\
+		__cobalt_symbolic_syscall(mq_open),			\
+		__cobalt_symbolic_syscall(mq_close),			\
+		__cobalt_symbolic_syscall(mq_unlink),			\
+		__cobalt_symbolic_syscall(mq_getattr),			\
+		__cobalt_symbolic_syscall(mq_timedsend),		\
+		__cobalt_symbolic_syscall(mq_timedreceive),		\
+		__cobalt_symbolic_syscall(mq_notify),			\
+		__cobalt_symbolic_syscall(sched_minprio),		\
+		__cobalt_symbolic_syscall(sched_maxprio),		\
+		__cobalt_symbolic_syscall(sched_weightprio),		\
+		__cobalt_symbolic_syscall(sched_yield),			\
+		__cobalt_symbolic_syscall(sched_setscheduler_ex),	\
+		__cobalt_symbolic_syscall(sched_getscheduler_ex),	\
+		__cobalt_symbolic_syscall(sched_setconfig_np),		\
+		__cobalt_symbolic_syscall(sched_getconfig_np),		\
+		__cobalt_symbolic_syscall(timer_create),		\
+		__cobalt_symbolic_syscall(timer_delete),		\
+		__cobalt_symbolic_syscall(timer_settime),		\
+		__cobalt_symbolic_syscall(timer_gettime),		\
+		__cobalt_symbolic_syscall(timer_getoverrun),		\
+		__cobalt_symbolic_syscall(timerfd_create),		\
+		__cobalt_symbolic_syscall(timerfd_settime),		\
+		__cobalt_symbolic_syscall(timerfd_gettime),		\
+		__cobalt_symbolic_syscall(sigwait),			\
+		__cobalt_symbolic_syscall(sigwaitinfo),			\
+		__cobalt_symbolic_syscall(sigtimedwait),		\
+		__cobalt_symbolic_syscall(sigpending),			\
+		__cobalt_symbolic_syscall(kill),			\
+		__cobalt_symbolic_syscall(sigqueue),			\
+		__cobalt_symbolic_syscall(monitor_init),		\
+		__cobalt_symbolic_syscall(monitor_destroy),		\
+		__cobalt_symbolic_syscall(monitor_enter),		\
+		__cobalt_symbolic_syscall(monitor_wait),		\
+		__cobalt_symbolic_syscall(monitor_sync),		\
+		__cobalt_symbolic_syscall(monitor_exit),		\
+		__cobalt_symbolic_syscall(event_init),			\
+		__cobalt_symbolic_syscall(event_wait),			\
+		__cobalt_symbolic_syscall(event_sync),			\
+		__cobalt_symbolic_syscall(event_destroy),		\
+		__cobalt_symbolic_syscall(event_inquire),		\
+		__cobalt_symbolic_syscall(open),			\
+		__cobalt_symbolic_syscall(socket),			\
+		__cobalt_symbolic_syscall(close),			\
+		__cobalt_symbolic_syscall(ioctl),			\
+		__cobalt_symbolic_syscall(read),			\
+		__cobalt_symbolic_syscall(write),			\
+		__cobalt_symbolic_syscall(recvmsg),			\
+		__cobalt_symbolic_syscall(sendmsg),			\
+		__cobalt_symbolic_syscall(mmap),			\
+		__cobalt_symbolic_syscall(select),			\
+		__cobalt_symbolic_syscall(fcntl),			\
+		__cobalt_symbolic_syscall(migrate),			\
+		__cobalt_symbolic_syscall(archcall),			\
+		__cobalt_symbolic_syscall(trace),			\
+		__cobalt_symbolic_syscall(corectl),			\
+		__cobalt_symbolic_syscall(get_current),			\
+		__cobalt_symbolic_syscall(backtrace),			\
+		__cobalt_symbolic_syscall(serialdbg),			\
+		__cobalt_symbolic_syscall(extend),			\
+		__cobalt_symbolic_syscall(ftrace_puts),			\
+		__cobalt_symbolic_syscall(recvmmsg),			\
+		__cobalt_symbolic_syscall(sendmmsg),			\
+		__cobalt_symbolic_syscall(clock_adjtime),		\
+		__cobalt_symbolic_syscall(sem_timedwait64),		\
+		__cobalt_symbolic_syscall(clock_gettime64),		\
+		__cobalt_symbolic_syscall(clock_settime64),		\
+		__cobalt_symbolic_syscall(clock_nanosleep64),		\
+		__cobalt_symbolic_syscall(clock_getres64),		\
+		__cobalt_symbolic_syscall(clock_adjtime64),		\
+		__cobalt_symbolic_syscall(mutex_timedlock64),		\
+		__cobalt_symbolic_syscall(mq_timedsend64),  		\
+		__cobalt_symbolic_syscall(mq_timedreceive64),		\
+		__cobalt_symbolic_syscall(sigtimedwait64),		\
+		__cobalt_symbolic_syscall(monitor_wait64),		\
+		__cobalt_symbolic_syscall(event_wait64),		\
+		__cobalt_symbolic_syscall(recvmmsg64))
+
+DECLARE_EVENT_CLASS(cobalt_syscall_entry,
+	TP_PROTO(unsigned int nr),
+	TP_ARGS(nr),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, nr)
+	),
+
+	TP_fast_assign(
+		__entry->nr = nr;
+	),
+
+	TP_printk("syscall=%s", __cobalt_syscall_name(__entry->nr))
+);
+
+DECLARE_EVENT_CLASS(cobalt_syscall_exit,
+	TP_PROTO(long result),
+	TP_ARGS(result),
+
+	TP_STRUCT__entry(
+		__field(long, result)
+	),
+
+	TP_fast_assign(
+		__entry->result = result;
+	),
+
+	TP_printk("result=%ld", __entry->result)
+);
+
+#define cobalt_print_sched_policy(__policy)			\
+	__print_symbolic(__policy,				\
+			 {SCHED_NORMAL, "normal"},		\
+			 {SCHED_FIFO, "fifo"},			\
+			 {SCHED_RR, "rr"},			\
+			 {SCHED_TP, "tp"},			\
+			 {SCHED_QUOTA, "quota"},		\
+			 {SCHED_SPORADIC, "sporadic"},		\
+			 {SCHED_COBALT, "cobalt"},		\
+			 {SCHED_WEAK, "weak"})
+
+const char *cobalt_trace_parse_sched_params(struct trace_seq *, int,
+					    struct sched_param_ex *);
+
+#define __parse_sched_params(policy, params)			\
+	cobalt_trace_parse_sched_params(p, policy,		\
+					(struct sched_param_ex *)(params))
+
+DECLARE_EVENT_CLASS(cobalt_posix_schedparam,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__field(int, policy)
+		__dynamic_array(char, param_ex, sizeof(struct sched_param_ex))
+	),
+
+	TP_fast_assign(
+		__entry->pth = pth;
+		__entry->policy = policy;
+		memcpy(__get_dynamic_array(param_ex), param_ex, sizeof(*param_ex));
+	),
+
+	TP_printk("pth=%p policy=%s param={ %s }",
+		  (void *)__entry->pth,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __parse_sched_params(__entry->policy,
+				       __get_dynamic_array(param_ex))
+	)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_scheduler,
+	TP_PROTO(pid_t pid, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pid, policy, param_ex),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(int, policy)
+		__dynamic_array(char, param_ex, sizeof(struct sched_param_ex))
+	),
+
+	TP_fast_assign(
+		__entry->pid = pid;
+		__entry->policy = policy;
+		memcpy(__get_dynamic_array(param_ex), param_ex, sizeof(*param_ex));
+	),
+
+	TP_printk("pid=%d policy=%s param={ %s }",
+		  __entry->pid,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __parse_sched_params(__entry->policy,
+				       __get_dynamic_array(param_ex))
+	)
+);
+
+DECLARE_EVENT_CLASS(cobalt_void,
+	TP_PROTO(int dummy),
+	TP_ARGS(dummy),
+	TP_STRUCT__entry(
+		__field(int, dummy)
+	),
+	TP_fast_assign(
+		(void)dummy;
+	),
+	TP_printk("%s", "")
+);
+
+DEFINE_EVENT(cobalt_syscall_entry, cobalt_head_sysentry,
+	TP_PROTO(unsigned int nr),
+	TP_ARGS(nr)
+);
+
+DEFINE_EVENT(cobalt_syscall_exit, cobalt_head_sysexit,
+	TP_PROTO(long result),
+	TP_ARGS(result)
+);
+
+DEFINE_EVENT(cobalt_syscall_entry, cobalt_root_sysentry,
+	TP_PROTO(unsigned int nr),
+	TP_ARGS(nr)
+);
+
+DEFINE_EVENT(cobalt_syscall_exit, cobalt_root_sysexit,
+	TP_PROTO(long result),
+	TP_ARGS(result)
+);
+
+DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_create,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex)
+);
+
+DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_setschedparam,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex)
+);
+
+DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_getschedparam,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex)
+);
+
+TRACE_EVENT(cobalt_pthread_setschedprio,
+	TP_PROTO(unsigned long pth, int prio),
+	TP_ARGS(pth, prio),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__field(int, prio)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__entry->prio = prio;
+	),
+	TP_printk("pth=%p prio=%d", (void *)__entry->pth, __entry->prio)
+);
+
+#define cobalt_print_thread_mode(__mode)			\
+	__print_flags(__mode, "|",				\
+		      {PTHREAD_WARNSW, "warnsw"},		\
+		      {PTHREAD_LOCK_SCHED, "lock"},		\
+		      {PTHREAD_DISABLE_LOCKBREAK, "nolockbreak"})
+
+TRACE_EVENT(cobalt_pthread_setmode,
+	TP_PROTO(int clrmask, int setmask),
+	TP_ARGS(clrmask, setmask),
+	TP_STRUCT__entry(
+		__field(int, clrmask)
+		__field(int, setmask)
+	),
+	TP_fast_assign(
+		__entry->clrmask = clrmask;
+		__entry->setmask = setmask;
+	),
+	TP_printk("clrmask=%#x(%s) setmask=%#x(%s)",
+		  __entry->clrmask, cobalt_print_thread_mode(__entry->clrmask),
+		  __entry->setmask, cobalt_print_thread_mode(__entry->setmask))
+);
+
+TRACE_EVENT(cobalt_pthread_setname,
+	TP_PROTO(unsigned long pth, const char *name),
+	TP_ARGS(pth, name),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__assign_str(name, name);
+	),
+	TP_printk("pth=%p name=%s", (void *)__entry->pth, __get_str(name))
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_pid,
+	TP_PROTO(pid_t pid),
+	TP_ARGS(pid),
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+	),
+	TP_fast_assign(
+		__entry->pid = pid;
+	),
+	TP_printk("pid=%d", __entry->pid)
+);
+
+DEFINE_EVENT(cobalt_posix_pid, cobalt_pthread_stat,
+	TP_PROTO(pid_t pid),
+	TP_ARGS(pid)
+);
+
+TRACE_EVENT(cobalt_pthread_kill,
+	TP_PROTO(unsigned long pth, int sig),
+	TP_ARGS(pth, sig),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__field(int, sig)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__entry->sig = sig;
+	),
+	TP_printk("pth=%p sig=%d", (void *)__entry->pth, __entry->sig)
+);
+
+TRACE_EVENT(cobalt_pthread_join,
+	TP_PROTO(unsigned long pth),
+	TP_ARGS(pth),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+	),
+	TP_printk("pth=%p", (void *)__entry->pth)
+);
+
+TRACE_EVENT(cobalt_pthread_pid,
+	TP_PROTO(unsigned long pth),
+	TP_ARGS(pth),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+	),
+	TP_printk("pth=%p", (void *)__entry->pth)
+);
+
+TRACE_EVENT(cobalt_pthread_extend,
+	TP_PROTO(unsigned long pth, const char *name),
+	TP_ARGS(pth, name),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__assign_str(name, name);
+	),
+	TP_printk("pth=%p +personality=%s", (void *)__entry->pth, __get_str(name))
+);
+
+TRACE_EVENT(cobalt_pthread_restrict,
+	TP_PROTO(unsigned long pth, const char *name),
+	TP_ARGS(pth, name),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__assign_str(name, name);
+	),
+	TP_printk("pth=%p -personality=%s", (void *)__entry->pth, __get_str(name))
+);
+
+DEFINE_EVENT(cobalt_void, cobalt_pthread_yield,
+	TP_PROTO(int dummy),
+	TP_ARGS(dummy)
+);
+
+TRACE_EVENT(cobalt_sched_setconfig,
+	TP_PROTO(int cpu, int policy, size_t len),
+	TP_ARGS(cpu, policy, len),
+	TP_STRUCT__entry(
+		__field(int, cpu)
+		__field(int, policy)
+		__field(size_t, len)
+	),
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->policy = policy;
+		__entry->len = len;
+	),
+	TP_printk("cpu=%d policy=%d(%s) len=%zu",
+		  __entry->cpu, __entry->policy,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __entry->len)
+);
+
+TRACE_EVENT(cobalt_sched_get_config,
+	TP_PROTO(int cpu, int policy, size_t rlen),
+	TP_ARGS(cpu, policy, rlen),
+	TP_STRUCT__entry(
+		__field(int, cpu)
+		__field(int, policy)
+		__field(ssize_t, rlen)
+	),
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->policy = policy;
+		__entry->rlen = rlen;
+	),
+	TP_printk("cpu=%d policy=%d(%s) rlen=%Zd",
+		  __entry->cpu, __entry->policy,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __entry->rlen)
+);
+
+DEFINE_EVENT(cobalt_posix_scheduler, cobalt_sched_setscheduler,
+	TP_PROTO(pid_t pid, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pid, policy, param_ex)
+);
+
+DEFINE_EVENT(cobalt_posix_pid, cobalt_sched_getscheduler,
+	TP_PROTO(pid_t pid),
+	TP_ARGS(pid)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_prio_bound,
+	TP_PROTO(int policy, int prio),
+	TP_ARGS(policy, prio),
+	TP_STRUCT__entry(
+		__field(int, policy)
+		__field(int, prio)
+	),
+	TP_fast_assign(
+		__entry->policy = policy;
+		__entry->prio = prio;
+	),
+	TP_printk("policy=%d(%s) prio=%d",
+		  __entry->policy,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __entry->prio)
+);
+
+DEFINE_EVENT(cobalt_posix_prio_bound, cobalt_sched_min_prio,
+	TP_PROTO(int policy, int prio),
+	TP_ARGS(policy, prio)
+);
+
+DEFINE_EVENT(cobalt_posix_prio_bound, cobalt_sched_max_prio,
+	TP_PROTO(int policy, int prio),
+	TP_ARGS(policy, prio)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_sem,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle),
+	TP_STRUCT__entry(
+		__field(xnhandle_t, handle)
+	),
+	TP_fast_assign(
+		__entry->handle = handle;
+	),
+	TP_printk("sem=%#x", __entry->handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_wait,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_trywait,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_timedwait,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_post,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_destroy,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_broadcast,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_inquire,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+TRACE_EVENT(cobalt_psem_getvalue,
+	TP_PROTO(xnhandle_t handle, int value),
+	TP_ARGS(handle, value),
+	TP_STRUCT__entry(
+		__field(xnhandle_t, handle)
+		__field(int, value)
+	),
+	TP_fast_assign(
+		__entry->handle = handle;
+		__entry->value = value;
+	),
+	TP_printk("sem=%#x value=%d", __entry->handle, __entry->value)
+);
+
+#define cobalt_print_sem_flags(__flags)				\
+  	__print_flags(__flags, "|",				\
+			 {SEM_FIFO, "fifo"},			\
+			 {SEM_PULSE, "pulse"},			\
+			 {SEM_PSHARED, "pshared"},		\
+			 {SEM_REPORT, "report"},		\
+			 {SEM_WARNDEL, "warndel"},		\
+			 {SEM_RAWCLOCK, "rawclock"},		\
+			 {SEM_NOBUSYDEL, "nobusydel"})
+
+TRACE_EVENT(cobalt_psem_init,
+	TP_PROTO(const char *name, xnhandle_t handle,
+		 int flags, unsigned int value),
+	TP_ARGS(name, handle, flags, value),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(xnhandle_t, handle)
+		__field(int, flags)
+		__field(unsigned int, value)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->handle = handle;
+		__entry->flags = flags;
+		__entry->value = value;
+	),
+	TP_printk("sem=%#x(%s) flags=%#x(%s) value=%u",
+		  __entry->handle,
+		  __get_str(name),
+		  __entry->flags,
+		  cobalt_print_sem_flags(__entry->flags),
+		  __entry->value)
+);
+
+TRACE_EVENT(cobalt_psem_init_failed,
+	TP_PROTO(const char *name, int flags, unsigned int value, int status),
+	TP_ARGS(name, flags, value, status),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, flags)
+		__field(unsigned int, value)
+		__field(int, status)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->flags = flags;
+		__entry->value = value;
+		__entry->status = status;
+	),
+	TP_printk("name=%s flags=%#x(%s) value=%u error=%d",
+		  __get_str(name),
+		  __entry->flags,
+		  cobalt_print_sem_flags(__entry->flags),
+		  __entry->value, __entry->status)
+);
+
+#define cobalt_print_oflags(__flags)		\
+	__print_flags(__flags,  "|", 		\
+		      {O_RDONLY, "rdonly"},	\
+		      {O_WRONLY, "wronly"},	\
+		      {O_RDWR, "rdwr"},		\
+		      {O_CREAT, "creat"},	\
+		      {O_EXCL, "excl"},		\
+		      {O_DIRECT, "direct"},	\
+		      {O_NONBLOCK, "nonblock"},	\
+		      {O_TRUNC, "trunc"})
+
+TRACE_EVENT(cobalt_psem_open,
+	TP_PROTO(const char *name, xnhandle_t handle,
+		 int oflags, mode_t mode, unsigned int value),
+	TP_ARGS(name, handle, oflags, mode, value),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(xnhandle_t, handle)
+		__field(int, oflags)
+		__field(mode_t, mode)
+		__field(unsigned int, value)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->handle = handle;
+		__entry->oflags = oflags;
+		if (oflags & O_CREAT) {
+			__entry->mode = mode;
+			__entry->value = value;
+		} else {
+			__entry->mode = 0;
+			__entry->value = 0;
+		}
+	),
+	TP_printk("named_sem=%#x=(%s) oflags=%#x(%s) mode=%o value=%u",
+		  __entry->handle, __get_str(name),
+		  __entry->oflags, cobalt_print_oflags(__entry->oflags),
+		  __entry->mode, __entry->value)
+);
+
+TRACE_EVENT(cobalt_psem_open_failed,
+	TP_PROTO(const char *name, int oflags, mode_t mode,
+		 unsigned int value, int status),
+	TP_ARGS(name, oflags, mode, value, status),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, oflags)
+		__field(mode_t, mode)
+		__field(unsigned int, value)
+		__field(int, status)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->oflags = oflags;
+		__entry->status = status;
+		if (oflags & O_CREAT) {
+			__entry->mode = mode;
+			__entry->value = value;
+		} else {
+			__entry->mode = 0;
+			__entry->value = 0;
+		}
+	),
+	TP_printk("named_sem=%s oflags=%#x(%s) mode=%o value=%u error=%d",
+		  __get_str(name),
+		  __entry->oflags, cobalt_print_oflags(__entry->oflags),
+		  __entry->mode, __entry->value, __entry->status)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_close,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+TRACE_EVENT(cobalt_psem_unlink,
+	TP_PROTO(const char *name),
+	TP_ARGS(name),
+	TP_STRUCT__entry(
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+	TP_printk("name=%s", __get_str(name))
+);
+
+DECLARE_EVENT_CLASS(cobalt_clock_timespec,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *val),
+	TP_ARGS(clk_id, val),
+
+	TP_STRUCT__entry(
+		__field(clockid_t, clk_id)
+		__timespec_fields(val)
+	),
+
+	TP_fast_assign(
+		__entry->clk_id = clk_id;
+		__assign_timespec(val, val);
+	),
+
+	TP_printk("clock_id=%d timeval=(%lld.%09ld)",
+		  __entry->clk_id,
+		  __timespec_args(val)
+	)
+);
+
+DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_getres,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *res),
+	TP_ARGS(clk_id, res)
+);
+
+DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_gettime,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *time),
+	TP_ARGS(clk_id, time)
+);
+
+DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_settime,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *time),
+	TP_ARGS(clk_id, time)
+);
+
+TRACE_EVENT(cobalt_clock_adjtime,
+	TP_PROTO(clockid_t clk_id, struct __kernel_timex *tx),
+	TP_ARGS(clk_id, tx),
+
+	TP_STRUCT__entry(
+		__field(clockid_t, clk_id)
+		__field(struct __kernel_timex *, tx)
+	),
+
+	TP_fast_assign(
+		__entry->clk_id = clk_id;
+		__entry->tx = tx;
+	),
+
+	TP_printk("clock_id=%d timex=%p",
+		  __entry->clk_id,
+		  __entry->tx
+	)
+);
+
+#define cobalt_print_timer_flags(__flags)			\
+	__print_flags(__flags, "|",				\
+		      {TIMER_ABSTIME, "TIMER_ABSTIME"})
+
+TRACE_EVENT(cobalt_clock_nanosleep,
+	TP_PROTO(clockid_t clk_id, int flags, const struct timespec64 *time),
+	TP_ARGS(clk_id, flags, time),
+
+	TP_STRUCT__entry(
+		__field(clockid_t, clk_id)
+		__field(int, flags)
+		__timespec_fields(time)
+	),
+
+	TP_fast_assign(
+		__entry->clk_id = clk_id;
+		__entry->flags = flags;
+		__assign_timespec(time, time);
+	),
+
+	TP_printk("clock_id=%d flags=%#x(%s) rqt=(%lld.%09ld)",
+		  __entry->clk_id,
+		  __entry->flags, cobalt_print_timer_flags(__entry->flags),
+		  __timespec_args(time)
+	)
+);
+
+DECLARE_EVENT_CLASS(cobalt_clock_ident,
+	TP_PROTO(const char *name, clockid_t clk_id),
+	TP_ARGS(name, clk_id),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(clockid_t, clk_id)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->clk_id = clk_id;
+	),
+	TP_printk("name=%s, id=%#x", __get_str(name), __entry->clk_id)
+);
+
+DEFINE_EVENT(cobalt_clock_ident, cobalt_clock_register,
+	TP_PROTO(const char *name, clockid_t clk_id),
+	TP_ARGS(name, clk_id)
+);
+
+DEFINE_EVENT(cobalt_clock_ident, cobalt_clock_deregister,
+	TP_PROTO(const char *name, clockid_t clk_id),
+	TP_ARGS(name, clk_id)
+);
+
+#define cobalt_print_clock(__clk_id)					\
+	__print_symbolic(__clk_id,					\
+			 {CLOCK_MONOTONIC, "CLOCK_MONOTONIC"},		\
+			 {CLOCK_MONOTONIC_RAW, "CLOCK_MONOTONIC_RAW"},	\
+			 {CLOCK_REALTIME, "CLOCK_REALTIME"})
+
+TRACE_EVENT(cobalt_cond_init,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd,
+		 const struct cobalt_condattr *attr),
+	TP_ARGS(u_cnd, attr),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+		__field(clockid_t, clk_id)
+		__field(int, pshared)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+		__entry->clk_id = attr->clock;
+		__entry->pshared = attr->pshared;
+	),
+	TP_printk("cond=%p attr={ .clock=%s, .pshared=%d }",
+		  __entry->u_cnd,
+		  cobalt_print_clock(__entry->clk_id),
+		  __entry->pshared)
+);
+
+TRACE_EVENT(cobalt_cond_destroy,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd),
+	TP_ARGS(u_cnd),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+	),
+	TP_printk("cond=%p", __entry->u_cnd)
+);
+
+TRACE_EVENT(cobalt_cond_timedwait,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd,
+		 const struct cobalt_mutex_shadow __user *u_mx,
+		 const struct timespec64 *timeout),
+	TP_ARGS(u_cnd, u_mx, timeout),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+		__field(const struct cobalt_mutex_shadow __user *, u_mx)
+		__timespec_fields(timeout)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+		__entry->u_mx = u_mx;
+		__assign_timespec(timeout, timeout);
+	),
+	TP_printk("cond=%p, mutex=%p, timeout=(%lld.%09ld)",
+		  __entry->u_cnd, __entry->u_mx, __timespec_args(timeout))
+);
+
+TRACE_EVENT(cobalt_cond_wait,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd,
+		 const struct cobalt_mutex_shadow __user *u_mx),
+	TP_ARGS(u_cnd, u_mx),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+		__field(const struct cobalt_mutex_shadow __user *, u_mx)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+		__entry->u_mx = u_mx;
+	),
+	TP_printk("cond=%p, mutex=%p",
+		  __entry->u_cnd, __entry->u_mx)
+);
+
+TRACE_EVENT(cobalt_mq_open,
+	TP_PROTO(const char *name, int oflags, mode_t mode),
+	TP_ARGS(name, oflags, mode),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, oflags)
+		__field(mode_t, mode)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->oflags = oflags;
+		__entry->mode = (oflags & O_CREAT) ? mode : 0;
+	),
+
+	TP_printk("name=%s oflags=%#x(%s) mode=%o",
+		  __get_str(name),
+		  __entry->oflags, cobalt_print_oflags(__entry->oflags),
+		  __entry->mode)
+);
+
+TRACE_EVENT(cobalt_mq_notify,
+	TP_PROTO(mqd_t mqd, const struct sigevent *sev),
+	TP_ARGS(mqd, sev),
+
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(int, signo)
+	),
+
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->signo = sev && sev->sigev_notify != SIGEV_NONE ?
+			sev->sigev_signo : 0;
+	),
+
+	TP_printk("mqd=%d signo=%d",
+		  __entry->mqd, __entry->signo)
+);
+
+TRACE_EVENT(cobalt_mq_close,
+	TP_PROTO(mqd_t mqd),
+	TP_ARGS(mqd),
+
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+	),
+
+	TP_fast_assign(
+		__entry->mqd = mqd;
+	),
+
+	TP_printk("mqd=%d", __entry->mqd)
+);
+
+TRACE_EVENT(cobalt_mq_unlink,
+	TP_PROTO(const char *name),
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("name=%s", __get_str(name))
+);
+
+TRACE_EVENT(cobalt_mq_send,
+	TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len,
+		 unsigned int prio),
+	TP_ARGS(mqd, u_buf, len, prio),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(const void __user *, u_buf)
+		__field(size_t, len)
+		__field(unsigned int, prio)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->u_buf = u_buf;
+		__entry->len = len;
+		__entry->prio = prio;
+	),
+	TP_printk("mqd=%d buf=%p len=%zu prio=%u",
+		  __entry->mqd, __entry->u_buf, __entry->len,
+		  __entry->prio)
+);
+
+TRACE_EVENT(cobalt_mq_timedreceive,
+	TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len,
+		 const struct timespec64 *timeout),
+	TP_ARGS(mqd, u_buf, len, timeout),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(const void __user *, u_buf)
+		__field(size_t, len)
+		__timespec_fields(timeout)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->u_buf = u_buf;
+		__entry->len = len;
+		__assign_timespec(timeout, timeout);
+	),
+	TP_printk("mqd=%d buf=%p len=%zu timeout=(%lld.%09ld)",
+		  __entry->mqd, __entry->u_buf, __entry->len,
+		  __timespec_args(timeout))
+);
+
+TRACE_EVENT(cobalt_mq_receive,
+	TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len),
+	TP_ARGS(mqd, u_buf, len),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(const void __user *, u_buf)
+		__field(size_t, len)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->u_buf = u_buf;
+		__entry->len = len;
+	),
+	TP_printk("mqd=%d buf=%p len=%zu",
+		  __entry->mqd, __entry->u_buf, __entry->len)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_mqattr,
+	TP_PROTO(mqd_t mqd, const struct mq_attr *attr),
+	TP_ARGS(mqd, attr),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(long, flags)
+		__field(long, curmsgs)
+		__field(long, msgsize)
+		__field(long, maxmsg)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->flags = attr->mq_flags;
+		__entry->curmsgs = attr->mq_curmsgs;
+		__entry->msgsize = attr->mq_msgsize;
+		__entry->maxmsg = attr->mq_maxmsg;
+	),
+	TP_printk("mqd=%d flags=%#lx(%s) curmsgs=%ld msgsize=%ld maxmsg=%ld",
+		  __entry->mqd,
+		  __entry->flags, cobalt_print_oflags(__entry->flags),
+		  __entry->curmsgs,
+		  __entry->msgsize,
+		  __entry->maxmsg
+	)
+);
+
+DEFINE_EVENT(cobalt_posix_mqattr, cobalt_mq_getattr,
+	TP_PROTO(mqd_t mqd, const struct mq_attr *attr),
+	TP_ARGS(mqd, attr)
+);
+
+DEFINE_EVENT(cobalt_posix_mqattr, cobalt_mq_setattr,
+	TP_PROTO(mqd_t mqd, const struct mq_attr *attr),
+	TP_ARGS(mqd, attr)
+);
+
+#define cobalt_print_evflags(__flags)			\
+	__print_flags(__flags,  "|",			\
+		      {COBALT_EVENT_SHARED, "shared"},	\
+		      {COBALT_EVENT_PRIO, "prio"})
+
+TRACE_EVENT(cobalt_event_init,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event,
+		 unsigned long value, int flags),
+	TP_ARGS(u_event, value, flags),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+		__field(unsigned long, value)
+		__field(int, flags)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+		__entry->value = value;
+		__entry->flags = flags;
+	),
+	TP_printk("event=%p value=%lu flags=%#x(%s)",
+		  __entry->u_event, __entry->value,
+		  __entry->flags, cobalt_print_evflags(__entry->flags))
+);
+
+#define cobalt_print_evmode(__mode)			\
+	__print_symbolic(__mode,			\
+			 {COBALT_EVENT_ANY, "any"},	\
+			 {COBALT_EVENT_ALL, "all"})
+
+TRACE_EVENT(cobalt_event_timedwait,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event,
+		 unsigned long bits, int mode,
+		 const struct timespec64 *timeout),
+	TP_ARGS(u_event, bits, mode, timeout),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+		__field(unsigned long, bits)
+		__field(int, mode)
+		__timespec_fields(timeout)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+		__entry->bits = bits;
+		__entry->mode = mode;
+		__assign_timespec(timeout, timeout);
+	),
+	TP_printk("event=%p bits=%#lx mode=%#x(%s) timeout=(%lld.%09ld)",
+		  __entry->u_event, __entry->bits, __entry->mode,
+		  cobalt_print_evmode(__entry->mode),
+		  __timespec_args(timeout))
+);
+
+TRACE_EVENT(cobalt_event_wait,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event,
+		 unsigned long bits, int mode),
+	TP_ARGS(u_event, bits, mode),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+		__field(unsigned long, bits)
+		__field(int, mode)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+		__entry->bits = bits;
+		__entry->mode = mode;
+	),
+	TP_printk("event=%p bits=%#lx mode=%#x(%s)",
+		  __entry->u_event, __entry->bits, __entry->mode,
+		  cobalt_print_evmode(__entry->mode))
+);
+
+DECLARE_EVENT_CLASS(cobalt_event_ident,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+	),
+	TP_printk("event=%p", __entry->u_event)
+);
+
+DEFINE_EVENT(cobalt_event_ident, cobalt_event_destroy,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event)
+);
+
+DEFINE_EVENT(cobalt_event_ident, cobalt_event_sync,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event)
+);
+
+DEFINE_EVENT(cobalt_event_ident, cobalt_event_inquire,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event)
+);
+
+#endif /* _TRACE_COBALT_POSIX_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cobalt-posix
+#include <trace/define_trace.h>
+++ linux-patched/include/trace/events/cobalt-rtdm.h	2022-03-21 12:58:28.904893691 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/trace/events/cobalt-core.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cobalt_rtdm
+
+#if !defined(_TRACE_COBALT_RTDM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COBALT_RTDM_H
+
+#include <linux/tracepoint.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+
+struct rtdm_fd;
+struct rtdm_event;
+struct rtdm_sem;
+struct rtdm_mutex;
+struct xnthread;
+struct rtdm_device;
+struct rtdm_dev_context;
+struct _rtdm_mmap_request;
+
+DECLARE_EVENT_CLASS(fd_event,
+	TP_PROTO(struct rtdm_fd *fd, int ufd),
+	TP_ARGS(fd, ufd),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+	),
+
+	TP_fast_assign(
+		__entry->dev = rtdm_fd_to_context(fd)->device;
+		__entry->ufd = ufd;
+	),
+
+	TP_printk("device=%p fd=%d",
+		  __entry->dev, __entry->ufd)
+);
+
+DECLARE_EVENT_CLASS(fd_request,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd, unsigned long arg),
+	TP_ARGS(task, fd, ufd, arg),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN)
+		__field(pid_t, pid)
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+		__field(unsigned long, arg)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+		__entry->pid = task_pid_nr(task);
+		__entry->dev = rtdm_fd_to_context(fd)->device;
+		__entry->ufd = ufd;
+		__entry->arg = arg;
+	),
+
+	TP_printk("device=%p fd=%d arg=%#lx pid=%d comm=%s",
+		  __entry->dev, __entry->ufd, __entry->arg,
+		  __entry->pid, __entry->comm)
+);
+
+DECLARE_EVENT_CLASS(fd_request_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd, int status),
+	TP_ARGS(task, fd, ufd, status),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN)
+		__field(pid_t, pid)
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+		__entry->pid = task_pid_nr(task);
+		__entry->dev =
+			!IS_ERR(fd) ? rtdm_fd_to_context(fd)->device : NULL;
+		__entry->ufd = ufd;
+	),
+
+	TP_printk("device=%p fd=%d pid=%d comm=%s",
+		  __entry->dev, __entry->ufd, __entry->pid, __entry->comm)
+);
+
+DECLARE_EVENT_CLASS(task_op,
+	TP_PROTO(struct xnthread *task),
+	TP_ARGS(task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+	),
+
+	TP_printk("task %p(%s)", __entry->task, __get_str(task_name))
+);
+
+DECLARE_EVENT_CLASS(event_op,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_event *, ev)
+	),
+
+	TP_fast_assign(
+		__entry->ev = ev;
+	),
+
+	TP_printk("event=%p", __entry->ev)
+);
+
+DECLARE_EVENT_CLASS(sem_op,
+	TP_PROTO(struct rtdm_sem *sem),
+	TP_ARGS(sem),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_sem *, sem)
+	),
+
+	TP_fast_assign(
+		__entry->sem = sem;
+	),
+
+	TP_printk("sem=%p", __entry->sem)
+);
+
+DECLARE_EVENT_CLASS(mutex_op,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_mutex *, mutex)
+	),
+
+	TP_fast_assign(
+		__entry->mutex = mutex;
+	),
+
+	TP_printk("mutex=%p", __entry->mutex)
+);
+
+TRACE_EVENT(cobalt_device_register,
+	TP_PROTO(struct rtdm_device *dev),
+	TP_ARGS(dev),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_device *, dev)
+		__string(device_name, dev->name)
+		__field(int, flags)
+		__field(int, class_id)
+		__field(int, subclass_id)
+		__field(int, profile_version)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= dev;
+		__assign_str(device_name, dev->name);
+		__entry->flags = dev->driver->device_flags;
+		__entry->class_id = dev->driver->profile_info.class_id;
+		__entry->subclass_id = dev->driver->profile_info.subclass_id;
+		__entry->profile_version = dev->driver->profile_info.version;
+	),
+
+	TP_printk("%s device %s=%p flags=0x%x, class=%d.%d profile=%d",
+		  (__entry->flags & RTDM_DEVICE_TYPE_MASK)
+		  == RTDM_NAMED_DEVICE ? "named" : "protocol",
+		  __get_str(device_name), __entry->dev,
+		  __entry->flags, __entry->class_id, __entry->subclass_id,
+		  __entry->profile_version)
+);
+
+TRACE_EVENT(cobalt_device_unregister,
+	TP_PROTO(struct rtdm_device *dev),
+	TP_ARGS(dev),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_device *, dev)
+		__string(device_name, dev->name)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= dev;
+		__assign_str(device_name, dev->name);
+	),
+
+	TP_printk("device %s=%p",
+		  __get_str(device_name), __entry->dev)
+);
+
+DEFINE_EVENT(fd_event, cobalt_fd_created,
+	TP_PROTO(struct rtdm_fd *fd, int ufd),
+	TP_ARGS(fd, ufd)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_open,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long oflags),
+	TP_ARGS(task, fd, ufd, oflags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_close,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long lock_count),
+	TP_ARGS(task, fd, ufd, lock_count)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_socket,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long protocol_family),
+	TP_ARGS(task, fd, ufd, protocol_family)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_read,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long len),
+	TP_ARGS(task, fd, ufd, len)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_write,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long len),
+	TP_ARGS(task, fd, ufd, len)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_ioctl,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long request),
+	TP_ARGS(task, fd, ufd, request)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_sendmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_sendmmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_recvmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_recvmmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+#define cobalt_print_protbits(__prot)		\
+	__print_flags(__prot,  "|", 		\
+		      {PROT_EXEC, "exec"},	\
+		      {PROT_READ, "read"},	\
+		      {PROT_WRITE, "write"})
+
+#define cobalt_print_mapbits(__flags)		\
+	__print_flags(__flags,  "|", 		\
+		      {MAP_SHARED, "shared"},	\
+		      {MAP_PRIVATE, "private"},	\
+		      {MAP_ANONYMOUS, "anon"},	\
+		      {MAP_FIXED, "fixed"},	\
+		      {MAP_HUGETLB, "huge"},	\
+		      {MAP_NONBLOCK, "nonblock"},	\
+		      {MAP_NORESERVE, "noreserve"},	\
+		      {MAP_POPULATE, "populate"},	\
+		      {MAP_UNINITIALIZED, "uninit"})
+
+TRACE_EVENT(cobalt_fd_mmap,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd, struct _rtdm_mmap_request *rma),
+        TP_ARGS(task, fd, ufd, rma),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN)
+		__field(pid_t, pid)
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+		__field(size_t, length)
+		__field(off_t, offset)
+		__field(int, prot)
+		__field(int, flags)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+		__entry->pid = task_pid_nr(task);
+		__entry->dev = rtdm_fd_to_context(fd)->device;
+		__entry->ufd = ufd;
+		__entry->length = rma->length;
+		__entry->offset = rma->offset;
+		__entry->prot = rma->prot;
+		__entry->flags = rma->flags;
+	),
+
+	TP_printk("device=%p fd=%d area={ len:%zu, off:%Lu }"
+		  " prot=%#x(%s) flags=%#x(%s) pid=%d comm=%s",
+		  __entry->dev, __entry->ufd, __entry->length,
+		  (unsigned long long)__entry->offset,
+		  __entry->prot, cobalt_print_protbits(__entry->prot),
+		  __entry->flags, cobalt_print_mapbits(__entry->flags),
+		  __entry->pid, __entry->comm)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_ioctl_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_read_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_write_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_recvmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_recvmmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_sendmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_sendmmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_mmap_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(task_op, cobalt_driver_task_join,
+	TP_PROTO(struct xnthread *task),
+	TP_ARGS(task)
+);
+
+TRACE_EVENT(cobalt_driver_event_init,
+	TP_PROTO(struct rtdm_event *ev, unsigned long pending),
+	TP_ARGS(ev, pending),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_event *, ev)
+		__field(unsigned long,	pending)
+	),
+
+	TP_fast_assign(
+		__entry->ev = ev;
+		__entry->pending = pending;
+	),
+
+	TP_printk("event=%p pending=%#lx",
+		  __entry->ev, __entry->pending)
+);
+
+TRACE_EVENT(cobalt_driver_event_wait,
+	TP_PROTO(struct rtdm_event *ev, struct xnthread *task),
+	TP_ARGS(ev, task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+		__field(struct rtdm_event *, ev)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+		__entry->ev = ev;
+	),
+
+	TP_printk("event=%p task=%p(%s)",
+		  __entry->ev, __entry->task, __get_str(task_name))
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_signal,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_clear,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_pulse,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_destroy,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+TRACE_EVENT(cobalt_driver_sem_init,
+	TP_PROTO(struct rtdm_sem *sem, unsigned long value),
+	TP_ARGS(sem, value),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_sem *, sem)
+		__field(unsigned long, value)
+	),
+
+	TP_fast_assign(
+		__entry->sem = sem;
+		__entry->value = value;
+	),
+
+	TP_printk("sem=%p value=%lu",
+		  __entry->sem, __entry->value)
+);
+
+TRACE_EVENT(cobalt_driver_sem_wait,
+	TP_PROTO(struct rtdm_sem *sem, struct xnthread *task),
+	TP_ARGS(sem, task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+		__field(struct rtdm_sem *, sem)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+		__entry->sem = sem;
+	),
+
+	TP_printk("sem=%p task=%p(%s)",
+		  __entry->sem, __entry->task, __get_str(task_name))
+);
+
+DEFINE_EVENT(sem_op, cobalt_driver_sem_up,
+	TP_PROTO(struct rtdm_sem *sem),
+	TP_ARGS(sem)
+);
+
+DEFINE_EVENT(sem_op, cobalt_driver_sem_destroy,
+	TP_PROTO(struct rtdm_sem *sem),
+	TP_ARGS(sem)
+);
+
+DEFINE_EVENT(mutex_op, cobalt_driver_mutex_init,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex)
+);
+
+DEFINE_EVENT(mutex_op, cobalt_driver_mutex_release,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex)
+);
+
+DEFINE_EVENT(mutex_op, cobalt_driver_mutex_destroy,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex)
+);
+
+TRACE_EVENT(cobalt_driver_mutex_wait,
+	TP_PROTO(struct rtdm_mutex *mutex, struct xnthread *task),
+	TP_ARGS(mutex, task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+		__field(struct rtdm_mutex *, mutex)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+		__entry->mutex = mutex;
+	),
+
+	TP_printk("mutex=%p task=%p(%s)",
+		  __entry->mutex, __entry->task, __get_str(task_name))
+);
+
+#endif /* _TRACE_COBALT_RTDM_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cobalt-rtdm
+#include <trace/define_trace.h>
+++ linux-patched/include/trace/events/cobalt-core.h	2022-03-21 12:58:28.896893769 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/init.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cobalt_core
+
+#if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COBALT_CORE_H
+
+#include <linux/tracepoint.h>
+#include <linux/math64.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/uapi/kernel/types.h>
+
+struct xnsched;
+struct xnthread;
+struct xnsynch;
+struct xnsched_class;
+struct xnsched_quota_group;
+struct xnthread_init_attr;
+
+DECLARE_EVENT_CLASS(thread_event,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(unsigned long, state)
+		__field(unsigned long, info)
+	),
+
+	TP_fast_assign(
+		__entry->state = thread->state;
+		__entry->info = thread->info;
+		__entry->pid = xnthread_host_pid(thread);
+	),
+
+	TP_printk("pid=%d state=0x%lx info=0x%lx",
+		  __entry->pid, __entry->state, __entry->info)
+);
+
+DECLARE_EVENT_CLASS(curr_thread_event,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(unsigned long, state)
+		__field(unsigned long, info)
+	),
+
+	TP_fast_assign(
+		__entry->state = thread->state;
+		__entry->info = thread->info;
+	),
+
+	TP_printk("state=0x%lx info=0x%lx",
+		  __entry->state, __entry->info)
+);
+
+DECLARE_EVENT_CLASS(synch_wait_event,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch),
+
+	TP_STRUCT__entry(
+		__field(struct xnsynch *, synch)
+	),
+
+	TP_fast_assign(
+		__entry->synch = synch;
+	),
+
+	TP_printk("synch=%p", __entry->synch)
+);
+
+DECLARE_EVENT_CLASS(synch_post_event,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch),
+
+	TP_STRUCT__entry(
+		__field(struct xnsynch *, synch)
+	),
+
+	TP_fast_assign(
+		__entry->synch = synch;
+	),
+
+	TP_printk("synch=%p", __entry->synch)
+);
+
+DECLARE_EVENT_CLASS(irq_event,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, irq)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+	),
+
+	TP_printk("irq=%u", __entry->irq)
+);
+
+DECLARE_EVENT_CLASS(clock_event,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, irq)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+	),
+
+	TP_printk("clock_irq=%u", __entry->irq)
+);
+
+DECLARE_EVENT_CLASS(timer_event,
+	TP_PROTO(struct xntimer *timer),
+	TP_ARGS(timer),
+
+	TP_STRUCT__entry(
+		__field(struct xntimer *, timer)
+	),
+
+	TP_fast_assign(
+		__entry->timer = timer;
+	),
+
+	TP_printk("timer=%p", __entry->timer)
+);
+
+DECLARE_EVENT_CLASS(registry_event,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr),
+
+	TP_STRUCT__entry(
+		__string(key, key ?: "(anon)")
+		__field(void *, addr)
+	),
+
+	TP_fast_assign(
+		__assign_str(key, key ?: "(anon)");
+		__entry->addr = addr;
+	),
+
+	TP_printk("key=%s, addr=%p", __get_str(key), __entry->addr)
+);
+
+TRACE_EVENT(cobalt_schedule,
+	TP_PROTO(struct xnsched *sched),
+	TP_ARGS(sched),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, status)
+	),
+
+	TP_fast_assign(
+		__entry->status = sched->status;
+	),
+
+	TP_printk("status=0x%lx", __entry->status)
+);
+
+TRACE_EVENT(cobalt_schedule_remote,
+	TP_PROTO(struct xnsched *sched),
+	TP_ARGS(sched),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, status)
+	),
+
+	TP_fast_assign(
+		__entry->status = sched->status;
+	),
+
+	TP_printk("status=0x%lx", __entry->status)
+);
+
+TRACE_EVENT(cobalt_switch_context,
+	TP_PROTO(struct xnthread *prev, struct xnthread *next),
+	TP_ARGS(prev, next),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, prev)
+		__string(prev_name, prev->name)
+		__field(pid_t, prev_pid)
+		__field(int, prev_prio)
+		__field(unsigned long, prev_state)
+		__field(struct xnthread *, next)
+		__string(next_name, next->name)
+		__field(pid_t, next_pid)
+		__field(int, next_prio)
+	),
+
+	TP_fast_assign(
+		__entry->prev = prev;
+		__assign_str(prev_name, prev->name);
+		__entry->prev_pid = xnthread_host_pid(prev);
+		__entry->prev_prio = xnthread_current_priority(prev);
+		__entry->prev_state = prev->state;
+		__entry->next = next;
+		__assign_str(next_name, next->name);
+		__entry->next_pid = xnthread_host_pid(next);
+		__entry->next_prio = xnthread_current_priority(next);
+	),
+
+	TP_printk("prev_name=%s prev_pid=%d prev_prio=%d prev_state=0x%lx ==> next_name=%s next_pid=%d next_prio=%d",
+		  __get_str(prev_name), __entry->prev_pid,
+		  __entry->prev_prio, __entry->prev_state,
+		  __get_str(next_name), __entry->next_pid, __entry->next_prio)
+);
+
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+
+TRACE_EVENT(cobalt_schedquota_refill,
+	TP_PROTO(int dummy),
+	TP_ARGS(dummy),
+
+	TP_STRUCT__entry(
+		__field(int, dummy)
+	),
+
+	TP_fast_assign(
+		(void)dummy;
+	),
+
+	TP_printk("%s", "")
+);
+
+DECLARE_EVENT_CLASS(schedquota_group_event,
+	TP_PROTO(struct xnsched_quota_group *tg),
+	TP_ARGS(tg),
+
+	TP_STRUCT__entry(
+		__field(int, tgid)
+	),
+
+	TP_fast_assign(
+		__entry->tgid = tg->tgid;
+	),
+
+	TP_printk("tgid=%d",
+		  __entry->tgid)
+);
+
+DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_create_group,
+	TP_PROTO(struct xnsched_quota_group *tg),
+	TP_ARGS(tg)
+);
+
+DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_destroy_group,
+	TP_PROTO(struct xnsched_quota_group *tg),
+	TP_ARGS(tg)
+);
+
+TRACE_EVENT(cobalt_schedquota_set_limit,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 int percent,
+		 int peak_percent),
+	TP_ARGS(tg, percent, peak_percent),
+
+	TP_STRUCT__entry(
+		__field(int, tgid)
+		__field(int, percent)
+		__field(int, peak_percent)
+	),
+
+	TP_fast_assign(
+		__entry->tgid = tg->tgid;
+		__entry->percent = percent;
+		__entry->peak_percent = peak_percent;
+	),
+
+	TP_printk("tgid=%d percent=%d peak_percent=%d",
+		  __entry->tgid, __entry->percent, __entry->peak_percent)
+);
+
+DECLARE_EVENT_CLASS(schedquota_thread_event,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 struct xnthread *thread),
+	TP_ARGS(tg, thread),
+
+	TP_STRUCT__entry(
+		__field(int, tgid)
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+	),
+
+	TP_fast_assign(
+		__entry->tgid = tg->tgid;
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+	),
+
+	TP_printk("tgid=%d thread=%p pid=%d",
+		  __entry->tgid, __entry->thread, __entry->pid)
+);
+
+DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_add_thread,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 struct xnthread *thread),
+	TP_ARGS(tg, thread)
+);
+
+DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_remove_thread,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 struct xnthread *thread),
+	TP_ARGS(tg, thread)
+);
+
+#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
+
+TRACE_EVENT(cobalt_thread_init,
+	TP_PROTO(struct xnthread *thread,
+		 const struct xnthread_init_attr *attr,
+		 struct xnsched_class *sched_class),
+	TP_ARGS(thread, attr, sched_class),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__string(thread_name, thread->name)
+		__string(class_name, sched_class->name)
+		__field(unsigned long, flags)
+		__field(int, cprio)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__assign_str(thread_name, thread->name);
+		__entry->flags = attr->flags;
+		__assign_str(class_name, sched_class->name);
+		__entry->cprio = thread->cprio;
+	),
+
+	TP_printk("thread=%p name=%s flags=0x%lx class=%s prio=%d",
+		   __entry->thread, __get_str(thread_name), __entry->flags,
+		   __get_str(class_name), __entry->cprio)
+);
+
+TRACE_EVENT(cobalt_thread_suspend,
+	TP_PROTO(struct xnthread *thread, unsigned long mask, xnticks_t timeout,
+		 xntmode_t timeout_mode, struct xnsynch *wchan),
+	TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(unsigned long, mask)
+		__field(xnticks_t, timeout)
+		__field(xntmode_t, timeout_mode)
+		__field(struct xnsynch *, wchan)
+	),
+
+	TP_fast_assign(
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->mask = mask;
+		__entry->timeout = timeout;
+		__entry->timeout_mode = timeout_mode;
+		__entry->wchan = wchan;
+	),
+
+	TP_printk("pid=%d mask=0x%lx timeout=%Lu timeout_mode=%d wchan=%p",
+		  __entry->pid, __entry->mask,
+		  __entry->timeout, __entry->timeout_mode, __entry->wchan)
+);
+
+TRACE_EVENT(cobalt_thread_resume,
+	TP_PROTO(struct xnthread *thread, unsigned long mask),
+	TP_ARGS(thread, mask),
+
+	TP_STRUCT__entry(
+		__string(name, thread->name)
+		__field(pid_t, pid)
+		__field(unsigned long, mask)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, thread->name);
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->mask = mask;
+	),
+
+	TP_printk("name=%s pid=%d mask=0x%lx",
+		  __get_str(name), __entry->pid, __entry->mask)
+);
+
+TRACE_EVENT(cobalt_thread_fault,
+	TP_PROTO(unsigned long ip, unsigned int type),
+	TP_ARGS(ip, type),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, ip)
+		__field(unsigned int, type)
+	),
+
+	TP_fast_assign(
+		__entry->ip = ip;
+		__entry->type = type;
+	),
+
+	TP_printk("ip=%#lx type=%#x",
+		  __entry->ip, __entry->type)
+);
+
+TRACE_EVENT(cobalt_thread_set_current_prio,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+		__field(int, cprio)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->cprio = xnthread_current_priority(thread);
+	),
+
+	TP_printk("thread=%p pid=%d prio=%d",
+		  __entry->thread, __entry->pid, __entry->cprio)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_start,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_cancel,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_join,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_unblock,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_thread_wait_period,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_thread_missed_period,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_thread_set_mode,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+TRACE_EVENT(cobalt_thread_migrate,
+	TP_PROTO(unsigned int cpu),
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+	),
+
+	TP_printk("cpu=%u", __entry->cpu)
+);
+
+TRACE_EVENT(cobalt_thread_migrate_passive,
+	TP_PROTO(struct xnthread *thread, unsigned int cpu),
+	TP_ARGS(thread, cpu),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+		__field(unsigned int, cpu)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->cpu = cpu;
+	),
+
+	TP_printk("thread=%p pid=%d cpu=%u",
+		  __entry->thread, __entry->pid, __entry->cpu)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_gohard,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_watchdog_signal,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_hardened,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+#define cobalt_print_relax_reason(reason)				\
+	__print_symbolic(reason,					\
+			 { SIGDEBUG_UNDEFINED,		"undefined" },	\
+			 { SIGDEBUG_MIGRATE_SIGNAL,	"signal" },	\
+			 { SIGDEBUG_MIGRATE_SYSCALL,	"syscall" },	\
+			 { SIGDEBUG_MIGRATE_FAULT,	"fault" })
+
+TRACE_EVENT(cobalt_shadow_gorelax,
+	TP_PROTO(int reason),
+	TP_ARGS(reason),
+
+	TP_STRUCT__entry(
+		__field(int, reason)
+	),
+
+	TP_fast_assign(
+		__entry->reason = reason;
+	),
+
+	TP_printk("reason=%s", cobalt_print_relax_reason(__entry->reason))
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_relaxed,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_entry,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+TRACE_EVENT(cobalt_shadow_map,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+		__field(int, prio)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->prio = xnthread_base_priority(thread);
+	),
+
+	TP_printk("thread=%p pid=%d prio=%d",
+		  __entry->thread, __entry->pid, __entry->prio)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_unmap,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+TRACE_EVENT(cobalt_lostage_request,
+        TP_PROTO(const char *type, struct task_struct *task),
+	TP_ARGS(type, task),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__array(char, comm, TASK_COMM_LEN)
+		__field(const char *, type)
+	),
+
+	TP_fast_assign(
+		__entry->type = type;
+		__entry->pid = task_pid_nr(task);
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("request=%s pid=%d comm=%s",
+		  __entry->type, __entry->pid, __entry->comm)
+);
+
+TRACE_EVENT(cobalt_lostage_wakeup,
+	TP_PROTO(struct task_struct *task),
+	TP_ARGS(task),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__array(char, comm, TASK_COMM_LEN)
+	),
+
+	TP_fast_assign(
+		__entry->pid = task_pid_nr(task);
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("pid=%d comm=%s",
+		  __entry->pid, __entry->comm)
+);
+
+TRACE_EVENT(cobalt_lostage_signal,
+	TP_PROTO(struct task_struct *task, int sig),
+	TP_ARGS(task, sig),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__array(char, comm, TASK_COMM_LEN)
+		__field(int, sig)
+	),
+
+	TP_fast_assign(
+		__entry->pid = task_pid_nr(task);
+		__entry->sig = sig;
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("pid=%d comm=%s sig=%d",
+		  __entry->pid, __entry->comm, __entry->sig)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_entry,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_exit,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_attach,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_detach,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_enable,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_disable,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(clock_event, cobalt_clock_entry,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(clock_event, cobalt_clock_exit,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(timer_event, cobalt_timer_stop,
+	TP_PROTO(struct xntimer *timer),
+	TP_ARGS(timer)
+);
+
+DEFINE_EVENT(timer_event, cobalt_timer_expire,
+	TP_PROTO(struct xntimer *timer),
+	TP_ARGS(timer)
+);
+
+#define cobalt_print_timer_mode(mode)			\
+	__print_symbolic(mode,				\
+			 { XN_RELATIVE, "rel" },	\
+			 { XN_ABSOLUTE, "abs" },	\
+			 { XN_REALTIME, "rt" })
+
+TRACE_EVENT(cobalt_timer_start,
+	TP_PROTO(struct xntimer *timer, xnticks_t value, xnticks_t interval,
+		 xntmode_t mode),
+	TP_ARGS(timer, value, interval, mode),
+
+	TP_STRUCT__entry(
+		__field(struct xntimer *, timer)
+#ifdef CONFIG_XENO_OPT_STATS
+		__string(name, timer->name)
+#endif
+		__field(xnticks_t, value)
+		__field(xnticks_t, interval)
+		__field(xntmode_t, mode)
+	),
+
+	TP_fast_assign(
+		__entry->timer = timer;
+#ifdef CONFIG_XENO_OPT_STATS
+		__assign_str(name, timer->name);
+#endif
+		__entry->value = value;
+		__entry->interval = interval;
+		__entry->mode = mode;
+	),
+
+	TP_printk("timer=%p(%s) value=%Lu interval=%Lu mode=%s",
+		  __entry->timer,
+#ifdef CONFIG_XENO_OPT_STATS
+		  __get_str(name),
+#else
+		  "(anon)",
+#endif
+		  __entry->value, __entry->interval,
+		  cobalt_print_timer_mode(__entry->mode))
+);
+
+#ifdef CONFIG_SMP
+
+TRACE_EVENT(cobalt_timer_migrate,
+	TP_PROTO(struct xntimer *timer, unsigned int cpu),
+	TP_ARGS(timer, cpu),
+
+	TP_STRUCT__entry(
+		__field(struct xntimer *, timer)
+		__field(unsigned int, cpu)
+	),
+
+	TP_fast_assign(
+		__entry->timer = timer;
+		__entry->cpu = cpu;
+	),
+
+	TP_printk("timer=%p cpu=%u",
+		  __entry->timer, __entry->cpu)
+);
+
+#endif /* CONFIG_SMP */
+
+DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_release,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(registry_event, cobalt_registry_enter,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr)
+);
+
+DEFINE_EVENT(registry_event, cobalt_registry_remove,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr)
+);
+
+DEFINE_EVENT(registry_event, cobalt_registry_unlink,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr)
+);
+
+TRACE_EVENT(cobalt_tick_shot,
+	TP_PROTO(s64 delta),
+	TP_ARGS(delta),
+
+	TP_STRUCT__entry(
+		__field(u64, secs)
+		__field(u32, nsecs)
+		__field(s64, delta)
+	),
+
+	TP_fast_assign(
+		__entry->delta = delta;
+		__entry->secs = div_u64_rem(trace_clock_local() + delta,
+					    NSEC_PER_SEC, &__entry->nsecs);
+	),
+
+	TP_printk("next tick at %Lu.%06u (delay: %Ld us)",
+		  (unsigned long long)__entry->secs,
+		  __entry->nsecs / 1000, div_s64(__entry->delta, 1000))
+);
+
+TRACE_EVENT(cobalt_trace,
+	TP_PROTO(const char *msg),
+	TP_ARGS(msg),
+	TP_STRUCT__entry(
+		__string(msg, msg)
+	),
+	TP_fast_assign(
+		__assign_str(msg, msg);
+	),
+	TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(cobalt_trace_longval,
+	TP_PROTO(int id, u64 val),
+	TP_ARGS(id, val),
+	TP_STRUCT__entry(
+		__field(int, id)
+		__field(u64, val)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->val = val;
+	),
+	TP_printk("id=%#x, v=%llu", __entry->id, __entry->val)
+);
+
+TRACE_EVENT(cobalt_trace_pid,
+	TP_PROTO(pid_t pid, int prio),
+	TP_ARGS(pid, prio),
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(int, prio)
+	),
+	TP_fast_assign(
+		__entry->pid = pid;
+		__entry->prio = prio;
+	),
+	TP_printk("pid=%d, prio=%d", __entry->pid, __entry->prio)
+);
+
+TRACE_EVENT(cobalt_latpeak,
+	TP_PROTO(int latmax_ns),
+	TP_ARGS(latmax_ns),
+	TP_STRUCT__entry(
+		 __field(int, latmax_ns)
+	),
+	TP_fast_assign(
+		__entry->latmax_ns = latmax_ns;
+	),
+	TP_printk("** latency peak: %d.%.3d us **",
+		  __entry->latmax_ns / 1000,
+		  __entry->latmax_ns % 1000)
+);
+
+/* Basically cobalt_trace() + trigger point */
+TRACE_EVENT(cobalt_trigger,
+	TP_PROTO(const char *issuer),
+	TP_ARGS(issuer),
+	TP_STRUCT__entry(
+		__string(issuer, issuer)
+	),
+	TP_fast_assign(
+		__assign_str(issuer, issuer);
+	),
+	TP_printk("%s", __get_str(issuer))
+);
+
+#endif /* _TRACE_COBALT_CORE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cobalt-core
+#include <trace/define_trace.h>
+++ linux-patched/kernel/xenomai/pipeline/init.c	2022-03-21 12:58:29.144891350 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/sched.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#include <linux/init.h>
+#include <pipeline/machine.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/assert.h>
+
+int __init pipeline_init(void)
+{
+	int ret;
+
+	if (cobalt_machine.init) {
+		ret = cobalt_machine.init();
+		if (ret)
+			return ret;
+	}
+
+	/* Enable the Xenomai out-of-band stage */
+	enable_oob_stage("Xenomai");
+
+	ret = xnclock_init();
+	if (ret)
+		goto fail_clock;
+
+	return 0;
+
+fail_clock:
+	if (cobalt_machine.cleanup)
+		cobalt_machine.cleanup();
+
+	return ret;
+}
+
+int __init pipeline_late_init(void)
+{
+	if (cobalt_machine.late_init)
+		return cobalt_machine.late_init();
+
+	return 0;
+}
+
+__init void pipeline_cleanup(void)
+{
+	/* Disable the Xenomai stage */
+	disable_oob_stage();
+
+	xnclock_cleanup();
+}
+++ linux-patched/kernel/xenomai/pipeline/sched.c	2022-03-21 12:58:29.141891380 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/intr.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001-2020 Philippe Gerum <rpm@xenomai.org>.
+ */
+
+#include <linux/cpuidle.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/sched.h>
+#include <pipeline/sched.h>
+#include <trace/events/cobalt-core.h>
+
+/* in-band stage, hard_irqs_disabled() */
+bool irq_cpuidle_control(struct cpuidle_device *dev,
+			struct cpuidle_state *state)
+{
+	/*
+	 * Deny entering sleep state if this entails stopping the
+	 * timer (i.e. C3STOP misfeature).
+	 */
+	if (state && (state->flags & CPUIDLE_FLAG_TIMER_STOP))
+		return false;
+
+	return true;
+}
+
+bool pipeline_switch_to(struct xnthread *prev, struct xnthread *next,
+			bool leaving_inband)
+{
+	return dovetail_context_switch(&xnthread_archtcb(prev)->altsched,
+			&xnthread_archtcb(next)->altsched, leaving_inband);
+}
+
+void pipeline_init_shadow_tcb(struct xnthread *thread)
+{
+	/*
+	 * Initialize the alternate scheduling control block.
+	 */
+	dovetail_init_altsched(&xnthread_archtcb(thread)->altsched);
+
+	trace_cobalt_shadow_map(thread);
+}
+
+void pipeline_init_root_tcb(struct xnthread *thread)
+{
+	/*
+	 * Initialize the alternate scheduling control block.
+	 */
+	dovetail_init_altsched(&xnthread_archtcb(thread)->altsched);
+}
+
+int pipeline_leave_inband(void)
+{
+	return dovetail_leave_inband();
+}
+
+int pipeline_leave_oob_prepare(void)
+{
+	int suspmask = XNRELAX;
+	struct xnthread *curr = xnthread_current();
+
+	dovetail_leave_oob();
+	/*
+	 * If current is being debugged, record that it should migrate
+	 * back in case it resumes in userspace. If it resumes in
+	 * kernel space, i.e.  over a restarting syscall, the
+	 * associated hardening will clear XNCONTHI.
+	 */
+	if (xnthread_test_state(curr, XNSSTEP)) {
+		xnthread_set_info(curr, XNCONTHI);
+		dovetail_request_ucall(current);
+		suspmask |= XNDBGSTOP;
+	}
+	return suspmask;
+}
+
+void pipeline_leave_oob_finish(void)
+{
+	dovetail_resume_inband();
+}
+
+void pipeline_raise_mayday(struct task_struct *tsk)
+{
+	dovetail_send_mayday(tsk);
+}
+
+void pipeline_clear_mayday(void) /* May solely affect current. */
+{
+	clear_thread_flag(TIF_MAYDAY);
+}
+
+irqreturn_t pipeline_reschedule_ipi_handler(int irq, void *dev_id)
+{
+	trace_cobalt_schedule_remote(xnsched_current());
+
+	/* Will reschedule from irq_exit_pipeline(). */
+
+	return IRQ_HANDLED;
+}
+++ linux-patched/kernel/xenomai/pipeline/intr.c	2022-03-21 12:58:29.137891419 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
+#include <linux/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/intr.h>
+
+void xnintr_host_tick(struct xnsched *sched) /* hard irqs off */
+{
+	sched->lflags &= ~XNHTICK;
+	tick_notify_proxy();
+}
+
+/*
+ * Low-level core clock irq handler. This one forwards ticks from the
+ * Xenomai platform timer to nkclock exclusively.
+ */
+void xnintr_core_clock_handler(void)
+{
+	struct xnsched *sched;
+
+	xnlock_get(&nklock);
+	xnclock_tick(&nkclock);
+	xnlock_put(&nklock);
+
+	/*
+	 * If the core clock interrupt preempted a real-time thread,
+	 * any transition to the root thread has already triggered a
+	 * host tick propagation from xnsched_run(), so at this point,
+	 * we only need to propagate the host tick in case the
+	 * interrupt preempted the root thread.
+	 */
+	sched = xnsched_current();
+	if ((sched->lflags & XNHTICK) &&
+	    xnthread_test_state(sched->curr, XNROOT))
+		xnintr_host_tick(sched);
+}
+
+static irqreturn_t xnintr_irq_handler(int irq, void *dev_id)
+{
+	struct xnintr *intr = dev_id;
+	int ret;
+
+	ret = intr->isr(intr);
+	XENO_WARN_ON_ONCE(USER, (ret & XN_IRQ_STATMASK) == 0);
+
+	if (ret & XN_IRQ_DISABLE)
+		disable_irq(irq);
+	else if (ret & XN_IRQ_PROPAGATE)
+		irq_post_inband(irq);
+
+	return ret & XN_IRQ_NONE ? IRQ_NONE : IRQ_HANDLED;
+}
+
+int xnintr_init(struct xnintr *intr, const char *name,
+		unsigned int irq, xnisr_t isr, xniack_t iack,
+		int flags)
+{
+	secondary_mode_only();
+
+	intr->irq = irq;
+	intr->isr = isr;
+	intr->iack = NULL;	/* unused */
+	intr->cookie = NULL;
+	intr->name = name ? : "<unknown>";
+	intr->flags = flags;
+	intr->status = 0;
+	intr->unhandled = 0;	/* unused */
+	raw_spin_lock_init(&intr->lock); /* unused */
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnintr_init);
+
+void xnintr_destroy(struct xnintr *intr)
+{
+	secondary_mode_only();
+	xnintr_detach(intr);
+}
+EXPORT_SYMBOL_GPL(xnintr_destroy);
+
+int xnintr_attach(struct xnintr *intr, void *cookie, const cpumask_t *cpumask)
+{
+	cpumask_t tmp_mask, *effective_mask;
+	int ret;
+
+	secondary_mode_only();
+
+	intr->cookie = cookie;
+
+	if (!cpumask) {
+		effective_mask = &xnsched_realtime_cpus;
+	} else {
+		effective_mask = &tmp_mask;
+		cpumask_and(effective_mask, &xnsched_realtime_cpus, cpumask);
+		if (cpumask_empty(effective_mask))
+			return -EINVAL;
+	}
+	ret = irq_set_affinity_hint(intr->irq, effective_mask);
+	if (ret)
+		return ret;
+
+	return request_irq(intr->irq, xnintr_irq_handler, IRQF_OOB,
+			intr->name, intr);
+}
+EXPORT_SYMBOL_GPL(xnintr_attach);
+
+void xnintr_detach(struct xnintr *intr)
+{
+	secondary_mode_only();
+	irq_set_affinity_hint(intr->irq, NULL);
+	free_irq(intr->irq, intr);
+}
+EXPORT_SYMBOL_GPL(xnintr_detach);
+
+void xnintr_enable(struct xnintr *intr)
+{
+}
+EXPORT_SYMBOL_GPL(xnintr_enable);
+
+void xnintr_disable(struct xnintr *intr)
+{
+}
+EXPORT_SYMBOL_GPL(xnintr_disable);
+
+int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask)
+{
+	cpumask_t effective_mask;
+
+	secondary_mode_only();
+
+	cpumask_and(&effective_mask, &xnsched_realtime_cpus, cpumask);
+	if (cpumask_empty(&effective_mask))
+		return -EINVAL;
+
+	return irq_set_affinity_hint(intr->irq, &effective_mask);
+}
+EXPORT_SYMBOL_GPL(xnintr_affinity);
+++ linux-patched/kernel/xenomai/pipeline/Makefile	2022-03-21 12:58:29.134891448 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2 @
--- linux/kernel/xenomai/pipeline/syscall.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/kernel
+
+obj-y +=	pipeline.o
+
+pipeline-y :=	init.o kevents.o sched.o tick.o syscall.o intr.o
+++ linux-patched/kernel/xenomai/pipeline/syscall.c	2022-03-21 12:58:29.130891487 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/tick.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>
+ * Copyright (C) 2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ */
+
+#include <linux/irqstage.h>
+#include <pipeline/pipeline.h>
+#include <pipeline/kevents.h>
+#include <cobalt/kernel/assert.h>
+#include <xenomai/posix/syscall.h>
+
+int handle_pipelined_syscall(struct irq_stage *stage, struct pt_regs *regs)
+{
+	if (unlikely(running_inband()))
+		return handle_root_syscall(regs);
+
+	return handle_head_syscall(stage == &inband_stage, regs);
+}
+
+int handle_oob_syscall(struct pt_regs *regs)
+{
+	return handle_head_syscall(false, regs);
+}
+++ linux-patched/kernel/xenomai/pipeline/tick.c	2022-03-21 12:58:29.127891516 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipeline/kevents.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001,2002,2003,2007,2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ */
+
+#include <linux/tick.h>
+#include <linux/clockchips.h>
+#include <cobalt/kernel/intr.h>
+#include <pipeline/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+
+static DEFINE_PER_CPU(struct clock_proxy_device *, proxy_device);
+
+const char *pipeline_timer_name(void)
+{
+	struct clock_proxy_device *dev = per_cpu(proxy_device, 0);
+	struct clock_event_device *real_dev = dev->real_device;
+
+	/*
+	 * Return the name of the current clock event chip, which is
+	 * the real device controlled by the proxy tick device.
+	 */
+	return real_dev->name;
+}
+
+void pipeline_set_timer_shot(unsigned long delay) /* ns */
+{
+	struct clock_proxy_device *dev = __this_cpu_read(proxy_device);
+	struct clock_event_device *real_dev = dev->real_device;
+	u64 cycles;
+	ktime_t t;
+	int ret;
+
+	if (real_dev->features & CLOCK_EVT_FEAT_KTIME) {
+		t = ktime_add(delay, xnclock_core_read_raw());
+		real_dev->set_next_ktime(t, real_dev);
+	} else {
+		if (delay <= 0) {
+			delay = real_dev->min_delta_ns;
+		} else {
+			delay = min_t(int64_t, delay,
+				real_dev->max_delta_ns);
+			delay = max_t(int64_t, delay,
+				real_dev->min_delta_ns);
+		}
+		cycles = ((u64)delay * real_dev->mult) >> real_dev->shift;
+		ret = real_dev->set_next_event(cycles, real_dev);
+		if (ret)
+			real_dev->set_next_event(real_dev->min_delta_ticks,
+						real_dev);
+	}
+}
+
+static int proxy_set_next_ktime(ktime_t expires,
+				struct clock_event_device *proxy_dev) /* hard irqs on/off */
+{
+	struct xnsched *sched;
+	unsigned long flags;
+	ktime_t delta;
+	int ret;
+
+	/*
+	 * Expiration dates of in-band timers are based on the common
+	 * monotonic time base. If the timeout date has already
+	 * elapsed, make sure xntimer_start() does not fail with
+	 * -ETIMEDOUT but programs the hardware for ticking
+	 * immediately instead.
+	 */
+	delta = ktime_sub(expires, ktime_get());
+	if (delta < 0)
+		delta = 0;
+
+	xnlock_get_irqsave(&nklock, flags);
+	sched = xnsched_current();
+	ret = xntimer_start(&sched->htimer, delta, XN_INFINITE, XN_RELATIVE);
+	xnlock_put_irqrestore(&nklock, flags);
+
+	return ret ? -ETIME : 0;
+}
+
+bool pipeline_must_force_program_tick(struct xnsched *sched)
+{
+	return sched->lflags & XNTSTOP;
+}
+
+static int proxy_set_oneshot_stopped(struct clock_event_device *proxy_dev)
+{
+	struct clock_event_device *real_dev;
+	struct clock_proxy_device *dev;
+	struct xnsched *sched;
+	spl_t s;
+
+	dev = container_of(proxy_dev, struct clock_proxy_device, proxy_device);
+
+	/*
+	 * In-band wants to disable the clock hardware on entering a
+	 * tickless state, so we have to stop our in-band tick
+	 * emulation. Propagate the request for shutting down the
+	 * hardware to the real device only if we have no outstanding
+	 * OOB timers. CAUTION: the in-band timer is counted when
+	 * assessing the RQ_IDLE condition, so we need to stop it
+	 * prior to testing the latter.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	sched = xnsched_current();
+	xntimer_stop(&sched->htimer);
+	sched->lflags |= XNTSTOP;
+
+	if (sched->lflags & XNIDLE) {
+		real_dev = dev->real_device;
+		real_dev->set_state_oneshot_stopped(real_dev);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static void setup_proxy(struct clock_proxy_device *dev)
+{
+	struct clock_event_device *proxy_dev = &dev->proxy_device;
+
+	dev->handle_oob_event = (typeof(dev->handle_oob_event))
+		xnintr_core_clock_handler;
+	proxy_dev->features |= CLOCK_EVT_FEAT_KTIME;
+	proxy_dev->set_next_ktime = proxy_set_next_ktime;
+	if (proxy_dev->set_state_oneshot_stopped)
+		proxy_dev->set_state_oneshot_stopped = proxy_set_oneshot_stopped;
+	__this_cpu_write(proxy_device, dev);
+}
+
+#ifdef CONFIG_SMP
+static irqreturn_t tick_ipi_handler(int irq, void *dev_id)
+{
+	xnintr_core_clock_handler();
+
+	return IRQ_HANDLED;
+}
+#endif
+
+int pipeline_install_tick_proxy(void)
+{
+	int ret;
+
+#ifdef CONFIG_SMP
+	/*
+	 * We may be running a SMP kernel on a uniprocessor machine
+	 * whose interrupt controller provides no IPI: attempt to hook
+	 * the timer IPI only if the hardware can support multiple
+	 * CPUs.
+	 */
+	if (num_possible_cpus() > 1) {
+		ret = __request_percpu_irq(TIMER_OOB_IPI,
+					tick_ipi_handler,
+					IRQF_OOB, "Xenomai timer IPI",
+					&cobalt_machine_cpudata);
+		if (ret)
+			return ret;
+	}
+#endif
+
+	/* Install the proxy tick device */
+	ret = tick_install_proxy(setup_proxy, &xnsched_realtime_cpus);
+	if (ret)
+		goto fail_proxy;
+
+	return 0;
+
+fail_proxy:
+#ifdef CONFIG_SMP
+	if (num_possible_cpus() > 1)
+		free_percpu_irq(TIMER_OOB_IPI, &cobalt_machine_cpudata);
+#endif
+
+	return ret;
+}
+
+void pipeline_uninstall_tick_proxy(void)
+{
+	/* Uninstall the proxy tick device. */
+	tick_uninstall_proxy(&xnsched_realtime_cpus);
+
+#ifdef CONFIG_SMP
+	if (num_possible_cpus() > 1)
+		free_percpu_irq(TIMER_OOB_IPI, &cobalt_machine_cpudata);
+#endif
+}
+++ linux-patched/kernel/xenomai/pipeline/kevents.c	2022-03-21 12:58:29.123891555 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/fd.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2001-2014 The Xenomai project <http://www.xenomai.org>
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org>
+ * RTAI/fusion Copyright (C) 2004 The RTAI project <http://www.rtai.org>
+ */
+
+#include <linux/ptrace.h>
+#include <pipeline/kevents.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/kernel/init.h>
+#include <rtdm/driver.h>
+#include <trace/events/cobalt-core.h>
+#include "../posix/process.h"
+#include "../posix/thread.h"
+#include "../posix/memory.h"
+
+void arch_inband_task_init(struct task_struct *tsk)
+{
+	struct cobalt_threadinfo *p = dovetail_task_state(tsk);
+
+	p->thread = NULL;
+	p->process = NULL;
+}
+
+void handle_oob_trap_entry(unsigned int trapnr, struct pt_regs *regs)
+{
+	struct xnthread *thread;
+	struct xnsched *sched;
+	spl_t s;
+
+	sched = xnsched_current();
+	thread = sched->curr;
+
+	/*
+	 * Enable back tracing.
+	 */
+	trace_cobalt_thread_fault(xnarch_fault_pc(regs), trapnr);
+
+	if (xnthread_test_state(thread, XNROOT))
+		return;
+
+	if (xnarch_fault_bp_p(trapnr) && user_mode(regs)) {
+		XENO_WARN_ON(CORE, xnthread_test_state(thread, XNRELAX));
+		xnlock_get_irqsave(&nklock, s);
+		xnthread_set_info(thread, XNCONTHI);
+		dovetail_request_ucall(current);
+		cobalt_stop_debugged_process(thread);
+		xnlock_put_irqrestore(&nklock, s);
+		xnsched_run();
+	}
+
+	/*
+	 * If we experienced a trap on behalf of a shadow thread
+	 * running in primary mode, move it to the Linux domain,
+	 * leaving the kernel process the exception.
+	 */
+#if defined(CONFIG_XENO_OPT_DEBUG_COBALT) || defined(CONFIG_XENO_OPT_DEBUG_USER)
+	if (!user_mode(regs)) {
+		xntrace_panic_freeze();
+		printk(XENO_WARNING
+		       "switching %s to secondary mode after exception #%u in "
+		       "kernel-space at 0x%lx (pid %d)\n", thread->name,
+		       trapnr,
+		       xnarch_fault_pc(regs),
+		       xnthread_host_pid(thread));
+		xntrace_panic_dump();
+	} else if (xnarch_fault_notify(trapnr)) /* Don't report debug traps */
+		printk(XENO_WARNING
+		       "switching %s to secondary mode after exception #%u from "
+		       "user-space at 0x%lx (pid %d)\n", thread->name,
+		       trapnr,
+		       xnarch_fault_pc(regs),
+		       xnthread_host_pid(thread));
+#endif
+
+	if (xnarch_fault_pf_p(trapnr))
+		/*
+		 * The page fault counter is not SMP-safe, but it's a
+		 * simple indicator that something went wrong wrt
+		 * memory locking anyway.
+		 */
+		xnstat_counter_inc(&thread->stat.pf);
+
+	xnthread_relax(xnarch_fault_notify(trapnr), SIGDEBUG_MIGRATE_FAULT);
+}
+
+static inline int handle_setaffinity_event(struct dovetail_migration_data *d)
+{
+	return cobalt_handle_setaffinity_event(d->task);
+}
+
+static inline int handle_taskexit_event(struct task_struct *p)
+{
+	return cobalt_handle_taskexit_event(p);
+}
+
+static inline int handle_user_return(struct task_struct *task)
+{
+	return cobalt_handle_user_return(task);
+}
+
+void handle_oob_mayday(struct pt_regs *regs)
+{
+	XENO_BUG_ON(COBALT, !xnthread_test_state(xnthread_current(), XNUSER));
+
+	xnthread_relax(0, 0);
+}
+
+static int handle_sigwake_event(struct task_struct *p)
+{
+	struct xnthread *thread;
+	sigset_t pending;
+	spl_t s;
+
+	thread = xnthread_from_task(p);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/*
+	 * CAUTION: __TASK_TRACED is not set in p->state yet. This
+	 * state bit will be set right after we return, when the task
+	 * is woken up.
+	 */
+	if ((p->ptrace & PT_PTRACED) && !xnthread_test_state(thread, XNSSTEP)) {
+		/* We already own the siglock. */
+		sigorsets(&pending,
+			  &p->pending.signal,
+			  &p->signal->shared_pending.signal);
+
+		if (sigismember(&pending, SIGTRAP) ||
+		    sigismember(&pending, SIGSTOP)
+		    || sigismember(&pending, SIGINT))
+			cobalt_register_debugged_thread(thread);
+	}
+
+	if (xnthread_test_state(thread, XNRELAX))
+		goto out;
+
+	/*
+	 * Allow a thread stopped for debugging to resume briefly in order to
+	 * migrate to secondary mode. xnthread_relax will reapply XNDBGSTOP.
+	 */
+	if (xnthread_test_state(thread, XNDBGSTOP))
+		xnthread_resume(thread, XNDBGSTOP);
+
+	__xnthread_kick(thread);
+out:
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return KEVENT_PROPAGATE;
+}
+
+static inline int handle_cleanup_event(struct mm_struct *mm)
+{
+	return cobalt_handle_cleanup_event(mm);
+}
+
+void pipeline_cleanup_process(void)
+{
+	dovetail_stop_altsched();
+}
+
+int handle_ptrace_resume(struct task_struct *tracee)
+{
+	struct xnthread *thread;
+	spl_t s;
+
+	thread = xnthread_from_task(tracee);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	if (xnthread_test_state(thread, XNSSTEP)) {
+		xnlock_get_irqsave(&nklock, s);
+
+		xnthread_resume(thread, XNDBGSTOP);
+		cobalt_unregister_debugged_thread(thread);
+
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return KEVENT_PROPAGATE;
+}
+
+static void handle_ptrace_cont(void)
+{
+	struct xnthread *curr = xnthread_current();
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(curr, XNSSTEP)) {
+		if (!xnthread_test_info(curr, XNCONTHI))
+			cobalt_unregister_debugged_thread(curr);
+
+		xnthread_set_localinfo(curr, XNHICCUP);
+
+		dovetail_request_ucall(current);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+void handle_inband_event(enum inband_event_type event, void *data)
+{
+	switch (event) {
+	case INBAND_TASK_SIGNAL:
+		handle_sigwake_event(data);
+		break;
+	case INBAND_TASK_MIGRATION:
+		handle_setaffinity_event(data);
+		break;
+	case INBAND_TASK_EXIT:
+		if (xnthread_current())
+			handle_taskexit_event(current);
+		break;
+	case INBAND_TASK_RETUSER:
+		handle_user_return(data);
+		break;
+	case INBAND_TASK_PTSTEP:
+		handle_ptrace_resume(data);
+		break;
+	case INBAND_TASK_PTCONT:
+		handle_ptrace_cont();
+		break;
+	case INBAND_TASK_PTSTOP:
+		break;
+	case INBAND_PROCESS_CLEANUP:
+		handle_cleanup_event(data);
+		break;
+	}
+}
+
+/*
+ * Called by the in-band kernel when the CLOCK_REALTIME epoch changes.
+ */
+void inband_clock_was_set(void)
+{
+	if (realtime_core_enabled())
+		xnclock_set_wallclock(ktime_get_real_fast_ns());
+}
+
+#ifdef CONFIG_MMU
+
+int pipeline_prepare_current(void)
+{
+	struct task_struct *p = current;
+	kernel_siginfo_t si;
+
+	if ((p->mm->def_flags & VM_LOCKED) == 0) {
+		memset(&si, 0, sizeof(si));
+		si.si_signo = SIGDEBUG;
+		si.si_code = SI_QUEUE;
+		si.si_int = SIGDEBUG_NOMLOCK | sigdebug_marker;
+		send_sig_info(SIGDEBUG, &si, p);
+	}
+
+	return 0;
+}
+
+static inline int get_mayday_prot(void)
+{
+	return PROT_READ|PROT_EXEC;
+}
+
+#else /* !CONFIG_MMU */
+
+int pipeline_prepare_current(void)
+{
+	return 0;
+}
+
+static inline int get_mayday_prot(void)
+{
+	/*
+	 * Until we stop backing /dev/mem with the mayday page, we
+	 * can't ask for PROT_EXEC since the former does not define
+	 * mmap capabilities, and default ones won't allow an
+	 * executable mapping with MAP_SHARED. In the NOMMU case, this
+	 * is (currently) not an issue.
+	 */
+	return PROT_READ;
+}
+
+#endif /* !CONFIG_MMU */
+
+void resume_oob_task(struct task_struct *p) /* inband, oob stage stalled */
+{
+	struct xnthread *thread = xnthread_from_task(p);
+
+	xnlock_get(&nklock);
+
+	/*
+	 * We fire the handler before the thread is migrated, so that
+	 * thread->sched does not change between paired invocations of
+	 * relax_thread/harden_thread handlers.
+	 */
+	xnthread_run_handler_stack(thread, harden_thread);
+
+	cobalt_adjust_affinity(p);
+
+	xnthread_resume(thread, XNRELAX);
+
+	/*
+	 * In case we migrated independently of the user return notifier, clear
+	 * XNCONTHI here and also disable the notifier - we are already done.
+	 */
+	if (unlikely(xnthread_test_info(thread, XNCONTHI))) {
+		xnthread_clear_info(thread, XNCONTHI);
+		dovetail_clear_ucall();
+	}
+
+	/* Unregister as debugged thread in case we postponed this. */
+	if (unlikely(xnthread_test_state(thread, XNSSTEP)))
+		cobalt_unregister_debugged_thread(thread);
+
+	xnlock_put(&nklock);
+
+	xnsched_run();
+
+}
+
+void pipeline_attach_current(struct xnthread *thread)
+{
+	struct cobalt_threadinfo *p;
+
+	p = pipeline_current();
+	p->thread = thread;
+	p->process = cobalt_search_process(current->mm);
+	dovetail_init_altsched(&xnthread_archtcb(thread)->altsched);
+}
+
+int pipeline_trap_kevents(void)
+{
+	dovetail_start();
+	return 0;
+}
+
+void pipeline_enable_kevents(void)
+{
+	dovetail_start_altsched();
+}
+++ linux-patched/kernel/xenomai/rtdm/fd.c	2022-03-21 12:58:29.118891604 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/internal.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2013,2014 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/kthread.h>
+#include <linux/fdtable.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/time.h>
+#include <pipeline/inband_work.h>
+#include <trace/events/cobalt-rtdm.h>
+#include <rtdm/fd.h>
+#include "internal.h"
+#include "posix/process.h"
+#include "posix/syscall.h"
+#include "posix/clock.h"
+
+#define RTDM_SETFL_MASK (O_NONBLOCK)
+
+DEFINE_PRIVATE_XNLOCK(fdtree_lock);
+static LIST_HEAD(rtdm_fd_cleanup_queue);
+static struct semaphore rtdm_fd_cleanup_sem;
+
+struct rtdm_fd_index {
+	struct xnid id;
+	struct rtdm_fd *fd;
+};
+
+static int enosys(void)
+{
+	return -ENOSYS;
+}
+
+static int enotty(void)
+{
+	return -ENOTTY;
+}
+
+static int ebadf(void)
+{
+	return -EBADF;
+}
+
+static int enodev(void)
+{
+	return -ENODEV;
+}
+
+static inline struct rtdm_fd_index *
+fetch_fd_index(struct cobalt_ppd *p, int ufd)
+{
+	struct xnid *id = xnid_fetch(&p->fds, ufd);
+	if (id == NULL)
+		return NULL;
+
+	return container_of(id, struct rtdm_fd_index, id);
+}
+
+static struct rtdm_fd *fetch_fd(struct cobalt_ppd *p, int ufd)
+{
+	struct rtdm_fd_index *idx = fetch_fd_index(p, ufd);
+	if (idx == NULL)
+		return NULL;
+
+	return idx->fd;
+}
+
+#define assign_invalid_handler(__handler, __invalid)			\
+	do								\
+		(__handler) = (typeof(__handler))__invalid;		\
+	while (0)
+
+/* Calling this handler should beget ENOSYS if not implemented. */
+#define assign_switch_handler(__handler)				\
+	do								\
+		if ((__handler) == NULL)				\
+			assign_invalid_handler(__handler, enosys);	\
+	while (0)
+
+#define assign_default_handler(__handler, __invalid)			\
+	do								\
+		if ((__handler) == NULL)				\
+			assign_invalid_handler(__handler, __invalid);	\
+	while (0)
+
+#define __rt(__handler)		__handler ## _rt
+#define __nrt(__handler)	__handler ## _nrt
+
+/*
+ * Install a placeholder returning EADV if none of the dual handlers
+ * are implemented, ENOSYS otherwise for NULL handlers to trigger the
+ * adaptive switch.
+ */
+#define assign_default_dual_handlers(__handler, __invalid_handler)	\
+	do								\
+		if (__rt(__handler) || __nrt(__handler)) {		\
+			assign_switch_handler(__rt(__handler));		\
+			assign_switch_handler(__nrt(__handler));	\
+		} else {						\
+			assign_invalid_handler(__rt(__handler),		\
+					       __invalid_handler);	\
+			assign_invalid_handler(__nrt(__handler),	\
+					       __invalid_handler);	\
+		}							\
+	while (0)
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+static inline void set_compat_bit(struct rtdm_fd *fd)
+{
+	struct pt_regs *regs;
+
+	if (cobalt_ppd_get(0) == &cobalt_kernel_ppd)
+		fd->compat = 0;
+	else {
+		regs = task_pt_regs(current);
+		XENO_BUG_ON(COBALT, !__xn_syscall_p(regs));
+		fd->compat = __COBALT_CALL_COMPAT(__xn_reg_sys(regs));
+	}
+}
+
+#else	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+static inline void set_compat_bit(struct rtdm_fd *fd)
+{
+}
+
+#endif	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+int rtdm_fd_enter(struct rtdm_fd *fd, int ufd, unsigned int magic,
+		  struct rtdm_fd_ops *ops)
+{
+	struct cobalt_ppd *ppd;
+
+	secondary_mode_only();
+
+	if (magic == 0)
+		return -EINVAL;
+
+	assign_default_dual_handlers(ops->ioctl, enotty);
+	assign_default_dual_handlers(ops->read, ebadf);
+	assign_default_dual_handlers(ops->write, ebadf);
+	assign_default_dual_handlers(ops->recvmsg, ebadf);
+	assign_default_dual_handlers(ops->sendmsg, ebadf);
+	assign_default_handler(ops->select, ebadf);
+	assign_default_handler(ops->mmap, enodev);
+
+	ppd = cobalt_ppd_get(0);
+	fd->magic = magic;
+	fd->ops = ops;
+	fd->owner = ppd;
+	fd->ufd = ufd;
+	fd->refs = 1;
+	fd->stale = false;
+	set_compat_bit(fd);
+	INIT_LIST_HEAD(&fd->next);
+
+	return 0;
+}
+
+int rtdm_fd_register(struct rtdm_fd *fd, int ufd)
+{
+	struct rtdm_fd_index *idx;
+	struct cobalt_ppd *ppd;
+	spl_t s;
+	int ret = 0;
+
+	ppd = cobalt_ppd_get(0);
+	idx = kmalloc(sizeof(*idx), GFP_KERNEL);
+	if (idx == NULL)
+		return -ENOMEM;
+
+	idx->fd = fd;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	ret = xnid_enter(&ppd->fds, &idx->id, ufd);
+	xnlock_put_irqrestore(&fdtree_lock, s);
+	if (ret < 0) {
+		kfree(idx);
+		ret = -EBUSY;
+	}
+
+	return ret;
+}
+
+int rtdm_device_new_fd(struct rtdm_fd *fd, int ufd,
+			struct rtdm_device *device)
+{
+	spl_t s;
+	int ret;
+
+	ret = rtdm_fd_register(fd, ufd);
+	if (ret < 0)
+		return ret;
+
+	trace_cobalt_fd_created(fd, ufd);
+	xnlock_get_irqsave(&fdtree_lock, s);
+	list_add(&fd->next, &device->openfd_list);
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return 0;
+}
+
+/**
+ * @brief Retrieve and lock a RTDM file descriptor
+ *
+ * @param[in] ufd User-side file descriptor
+ * @param[in] magic Magic word for lookup validation
+ *
+ * @return Pointer to the RTDM file descriptor matching @a
+ * ufd. Otherwise:
+ *
+ * - ERR_PTR(-EADV) if the use-space handle is either invalid, or not
+ * managed by RTDM.
+ *
+ * - ERR_PTR(-EBADF) if the underlying device is being torn down at
+ * the time of the call.
+ *
+ * @note The file descriptor returned must be later released by a call
+ * to rtdm_fd_put().
+ *
+ * @coretags{unrestricted}
+ */
+struct rtdm_fd *rtdm_fd_get(int ufd, unsigned int magic)
+{
+	struct cobalt_ppd *p = cobalt_ppd_get(0);
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	fd = fetch_fd(p, ufd);
+	if (fd == NULL || (magic != 0 && fd->magic != magic)) {
+		fd = ERR_PTR(-EADV);
+		goto out;
+	}
+
+	if (fd->stale) {
+		fd = ERR_PTR(-EBADF);
+		goto out;
+	}
+
+	++fd->refs;
+out:
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return fd;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_get);
+
+static int fd_cleanup_thread(void *data)
+{
+	struct rtdm_fd *fd;
+	int err;
+	spl_t s;
+
+	for (;;) {
+		set_cpus_allowed_ptr(current, cpu_online_mask);
+
+		do {
+			err = down_interruptible(&rtdm_fd_cleanup_sem);
+			if (kthread_should_stop())
+				return 0;
+		} while (err);
+
+		xnlock_get_irqsave(&fdtree_lock, s);
+		fd = list_first_entry(&rtdm_fd_cleanup_queue,
+				struct rtdm_fd, cleanup);
+		list_del(&fd->cleanup);
+		xnlock_put_irqrestore(&fdtree_lock, s);
+
+		fd->ops->close(fd);
+	}
+
+	return 0;
+}
+
+static void lostage_trigger_close(struct pipeline_inband_work *inband_work)
+{
+	up(&rtdm_fd_cleanup_sem);
+}
+
+static struct lostage_trigger_close {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+} fd_closework =  {
+	.inband_work = PIPELINE_INBAND_WORK_INITIALIZER(fd_closework,
+						lostage_trigger_close),
+};
+
+static void __put_fd(struct rtdm_fd *fd, spl_t s)
+{
+	bool destroy, trigger;
+
+	XENO_WARN_ON(COBALT, fd->refs <= 0);
+	destroy = --fd->refs == 0;
+	if (destroy && !list_empty(&fd->next))
+		list_del_init(&fd->next);
+
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	if (!destroy)
+		return;
+
+	if (is_secondary_domain())
+		fd->ops->close(fd);
+	else {
+		xnlock_get_irqsave(&fdtree_lock, s);
+		trigger = list_empty(&rtdm_fd_cleanup_queue);
+		list_add_tail(&fd->cleanup, &rtdm_fd_cleanup_queue);
+		xnlock_put_irqrestore(&fdtree_lock, s);
+
+		if (trigger)
+			pipeline_post_inband_work(&fd_closework);
+	}
+}
+
+void rtdm_device_flush_fds(struct rtdm_device *dev)
+{
+	struct rtdm_driver *drv = dev->driver;
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+
+	while (!list_empty(&dev->openfd_list)) {
+		fd = list_get_entry_init(&dev->openfd_list, struct rtdm_fd, next);
+		fd->stale = true;
+		if (drv->ops.close) {
+			rtdm_fd_get_light(fd);
+			xnlock_put_irqrestore(&fdtree_lock, s);
+			drv->ops.close(fd);
+			rtdm_fd_put(fd);
+			xnlock_get_irqsave(&fdtree_lock, s);
+		}
+	}
+
+	xnlock_put_irqrestore(&fdtree_lock, s);
+}
+
+/**
+ * @brief Release a RTDM file descriptor obtained via rtdm_fd_get()
+ *
+ * @param[in] fd RTDM file descriptor to release
+ *
+ * @note Every call to rtdm_fd_get() must be matched by a call to
+ * rtdm_fd_put().
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_fd_put(struct rtdm_fd *fd)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	__put_fd(fd, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_put);
+
+/**
+ * @brief Hold a reference on a RTDM file descriptor
+ *
+ * @param[in] fd Target file descriptor
+ *
+ * @note rtdm_fd_lock() increments the reference counter of @a fd. You
+ * only need to call this function in special scenarios, e.g. when
+ * keeping additional references to the file descriptor that have
+ * different lifetimes. Only use rtdm_fd_lock() on descriptors that
+ * are currently locked via an earlier rtdm_fd_get()/rtdm_fd_lock() or
+ * while running a device operation handler.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_fd_lock(struct rtdm_fd *fd)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	if (fd->refs == 0) {
+		xnlock_put_irqrestore(&fdtree_lock, s);
+		return -EIDRM;
+	}
+	++fd->refs;
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_lock);
+
+/**
+ * @brief Drop a reference on a RTDM file descriptor
+ *
+ * @param[in] fd Target file descriptor
+ *
+ * @note Every call to rtdm_fd_lock() must be matched by a call to
+ * rtdm_fd_unlock().
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_fd_unlock(struct rtdm_fd *fd)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	__put_fd(fd, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_unlock);
+
+int rtdm_fd_fcntl(int ufd, int cmd, ...)
+{
+	struct rtdm_fd *fd;
+	va_list ap;
+	long arg;
+	int ret;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd))
+		return PTR_ERR(fd);
+
+	va_start(ap, cmd);
+	arg = va_arg(ap, long);
+	va_end(ap);
+
+	switch (cmd) {
+	case F_GETFL:
+		ret = fd->oflags;
+		break;
+	case F_SETFL:
+		fd->oflags = (fd->oflags & ~RTDM_SETFL_MASK) |
+			(arg & RTDM_SETFL_MASK);
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	rtdm_fd_put(fd);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_fcntl);
+
+static struct rtdm_fd *get_fd_fixup_mode(int ufd)
+{
+	struct xnthread *thread;
+	struct rtdm_fd *fd;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd))
+		return fd;
+
+	/*
+	 * Mode is selected according to the following convention:
+	 *
+	 * - Cobalt threads must try running the syscall from primary
+	 * mode as a first attempt, regardless of their scheduling
+	 * class. The driver handler may ask for demoting the caller
+	 * to secondary mode by returning -ENOSYS.
+	 *
+	 * - Regular threads (i.e. not bound to Cobalt) may only run
+	 * the syscall from secondary mode.
+	 */
+	thread = xnthread_current();
+	if (unlikely(is_secondary_domain())) {
+		if (thread == NULL ||
+		    xnthread_test_localinfo(thread, XNDESCENT))
+			return fd;
+	} else if (likely(thread))
+		return fd;
+
+	/*
+	 * We need to switch to the converse mode. Since all callers
+	 * bear the "adaptive" tag, we just pass -ENOSYS back to the
+	 * syscall dispatcher to get switched to the next mode.
+	 */
+	rtdm_fd_put(fd);
+
+	return ERR_PTR(-ENOSYS);
+}
+
+int rtdm_fd_ioctl(int ufd, unsigned int request, ...)
+{
+	struct rtdm_fd *fd;
+	void __user *arg;
+	va_list args;
+	int err, ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		err = PTR_ERR(fd);
+		goto out;
+	}
+
+	va_start(args, request);
+	arg = va_arg(args, void __user *);
+	va_end(args);
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_ioctl(current, fd, ufd, request);
+
+	if (is_secondary_domain())
+		err = fd->ops->ioctl_nrt(fd, request, arg);
+	else
+		err = fd->ops->ioctl_rt(fd, request, arg);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	if (err < 0) {
+		ret = __rtdm_dev_ioctl_core(fd, request, arg);
+		if (ret != -EADV)
+			err = ret;
+	}
+
+	rtdm_fd_put(fd);
+  out:
+	if (err < 0)
+		trace_cobalt_fd_ioctl_status(current, fd, ufd, err);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_ioctl);
+
+ssize_t
+rtdm_fd_read(int ufd, void __user *buf, size_t size)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_read(current, fd, ufd, size);
+
+	if (is_secondary_domain())
+		ret = fd->ops->read_nrt(fd, buf, size);
+	else
+		ret = fd->ops->read_rt(fd, buf, size);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		    splnone();
+
+	rtdm_fd_put(fd);
+
+  out:
+	if (ret < 0)
+		trace_cobalt_fd_read_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_read);
+
+ssize_t rtdm_fd_write(int ufd, const void __user *buf, size_t size)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_write(current, fd, ufd, size);
+
+	if (is_secondary_domain())
+		ret = fd->ops->write_nrt(fd, buf, size);
+	else
+		ret = fd->ops->write_rt(fd, buf, size);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+
+  out:
+	if (ret < 0)
+		trace_cobalt_fd_write_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_write);
+
+ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, int flags)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_recvmsg(current, fd, ufd, flags);
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	if (is_secondary_domain())
+		ret = fd->ops->recvmsg_nrt(fd, msg, flags);
+	else
+		ret = fd->ops->recvmsg_rt(fd, msg, flags);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+out:
+	if (ret < 0)
+		trace_cobalt_fd_recvmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_recvmsg);
+
+struct cobalt_recvmmsg_timer {
+	struct xntimer timer;
+	struct xnthread *waiter;
+};
+
+static void recvmmsg_timeout_handler(struct xntimer *timer)
+{
+	struct cobalt_recvmmsg_timer *rq;
+
+	rq = container_of(timer, struct cobalt_recvmmsg_timer, timer);
+	xnthread_set_info(rq->waiter, XNTIMEO);
+	xnthread_resume(rq->waiter, XNDELAY);
+}
+
+int __rtdm_fd_recvmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags, void __user *u_timeout,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg),
+		       int (*get_timespec)(struct timespec64 *ts, const void __user *u_ts))
+{
+	struct cobalt_recvmmsg_timer rq;
+	xntmode_t tmode = XN_RELATIVE;
+	struct timespec64 ts = { 0 };
+	int ret = 0, datagrams = 0;
+	xnticks_t timeout = 0;
+	struct mmsghdr mmsg;
+	struct rtdm_fd *fd;
+	void __user *u_p;
+	ssize_t len;
+	spl_t s;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_recvmmsg(current, fd, ufd, flags);
+
+	if (u_timeout) {
+		ret = get_timespec(&ts, u_timeout);
+		if (ret)
+			goto fail;
+
+		if (!timespec64_valid(&ts)) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		tmode = XN_ABSOLUTE;
+		timeout = ts2ns(&ts);
+		if (timeout == 0)
+			flags |= MSG_DONTWAIT;
+		else {
+			timeout += xnclock_read_monotonic(&nkclock);
+			rq.waiter = xnthread_current();
+			xntimer_init(&rq.timer, &nkclock,
+				     recvmmsg_timeout_handler,
+				     NULL, XNTIMER_IGRAVITY);
+			xnlock_get_irqsave(&nklock, s);
+			ret = xntimer_start(&rq.timer, timeout,
+					    XN_INFINITE, tmode);
+			xnlock_put_irqrestore(&nklock, s);
+		}
+	}
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	for (u_p = u_msgvec; vlen > 0; vlen--) {
+		ret = get_mmsg(&mmsg, u_p);
+		if (ret)
+			break;
+		len = fd->ops->recvmsg_rt(fd, &mmsg.msg_hdr, flags);
+		if (len < 0) {
+			ret = len;
+			break;
+		}
+		mmsg.msg_len = (unsigned int)len;
+		ret = put_mmsg(&u_p, &mmsg);
+		if (ret)
+			break;
+		datagrams++;
+		/* OOB data requires immediate handling. */
+		if (mmsg.msg_hdr.msg_flags & MSG_OOB)
+			break;
+		if (flags & MSG_WAITFORONE)
+			flags |= MSG_DONTWAIT;
+	}
+
+	if (timeout) {
+		xnlock_get_irqsave(&nklock, s);
+		xntimer_destroy(&rq.timer);
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+fail:
+	rtdm_fd_put(fd);
+
+	if (datagrams > 0)
+		ret = datagrams;
+
+out:
+	trace_cobalt_fd_recvmmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+
+static inline int __rtdm_fetch_timeout64(struct timespec64 *ts,
+					 const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts);
+}
+
+int __rtdm_fd_recvmmsg64(int ufd, void __user *u_msgvec, unsigned int vlen,
+			 unsigned int flags, void __user *u_timeout,
+			 int (*get_mmsg)(struct mmsghdr *mmsg,
+					 void __user *u_mmsg),
+			 int (*put_mmsg)(void __user **u_mmsg_p,
+					 const struct mmsghdr *mmsg))
+{
+	return __rtdm_fd_recvmmsg(ufd, u_msgvec, vlen, flags, u_timeout,
+				  get_mmsg, put_mmsg, __rtdm_fetch_timeout64);
+}
+
+
+ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr *msg, int flags)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_sendmsg(current, fd, ufd, flags);
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	if (is_secondary_domain())
+		ret = fd->ops->sendmsg_nrt(fd, msg, flags);
+	else
+		ret = fd->ops->sendmsg_rt(fd, msg, flags);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+out:
+	if (ret < 0)
+		trace_cobalt_fd_sendmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_sendmsg);
+
+int __rtdm_fd_sendmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg))
+{
+	int ret = 0, datagrams = 0;
+	struct mmsghdr mmsg;
+	struct rtdm_fd *fd;
+	void __user *u_p;
+	ssize_t len;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_sendmmsg(current, fd, ufd, flags);
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	for (u_p = u_msgvec; vlen > 0; vlen--) {
+		ret = get_mmsg(&mmsg, u_p);
+		if (ret)
+			break;
+		len = fd->ops->sendmsg_rt(fd, &mmsg.msg_hdr, flags);
+		if (len < 0) {
+			ret = len;
+			break;
+		}
+		mmsg.msg_len = (unsigned int)len;
+		ret = put_mmsg(&u_p, &mmsg);
+		if (ret)
+			break;
+		datagrams++;
+	}
+
+	rtdm_fd_put(fd);
+
+	if (datagrams > 0)
+		ret = datagrams;
+
+out:
+	trace_cobalt_fd_sendmmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+
+static void
+__fd_close(struct cobalt_ppd *p, struct rtdm_fd_index *idx, spl_t s)
+{
+	xnid_remove(&p->fds, &idx->id);
+	__put_fd(idx->fd, s);
+
+	kfree(idx);
+}
+
+int rtdm_fd_close(int ufd, unsigned int magic)
+{
+	struct rtdm_fd_index *idx;
+	struct cobalt_ppd *ppd;
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	secondary_mode_only();
+
+	ppd = cobalt_ppd_get(0);
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	idx = fetch_fd_index(ppd, ufd);
+	if (idx == NULL)
+		goto eadv;
+
+	fd = idx->fd;
+	if (magic != 0 && fd->magic != magic) {
+eadv:
+		xnlock_put_irqrestore(&fdtree_lock, s);
+		return -EADV;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_close(current, fd, ufd, fd->refs);
+
+	/*
+	 * In dual kernel mode, the linux-side fdtable and the RTDM
+	 * ->close() handler are asynchronously managed, i.e.  the
+	 * handler execution may be deferred after the regular file
+	 * descriptor was removed from the fdtable if some refs on
+	 * rtdm_fd are still pending.
+	 */
+	__fd_close(ppd, idx, s);
+	close_fd(ufd);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_close);
+
+int rtdm_fd_mmap(int ufd, struct _rtdm_mmap_request *rma,
+		 void **u_addrp)
+{
+	struct rtdm_fd *fd;
+	int ret;
+
+	secondary_mode_only();
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_mmap(current, fd, ufd, rma);
+
+	if (rma->flags & (MAP_FIXED|MAP_ANONYMOUS)) {
+		ret = -EADV;
+		goto unlock;
+	}
+
+	ret = __rtdm_mmap_from_fdop(fd, rma->length, rma->offset,
+				    rma->prot, rma->flags, u_addrp);
+unlock:
+	rtdm_fd_put(fd);
+out:
+	if (ret)
+		trace_cobalt_fd_mmap_status(current, fd, ufd, ret);
+
+	return ret;
+}
+
+int rtdm_fd_valid_p(int ufd)
+{
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	fd = fetch_fd(cobalt_ppd_get(0), ufd);
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return fd != NULL;
+}
+
+/**
+ * @brief Bind a selector to specified event types of a given file descriptor
+ * @internal
+ *
+ * This function is invoked by higher RTOS layers implementing select-like
+ * services. It shall not be called directly by RTDM drivers.
+ *
+ * @param[in] ufd User-side file descriptor to bind to
+ * @param[in,out] selector Selector object that shall be bound to the given
+ * event
+ * @param[in] type Event type the caller is interested in
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EADV is returned if the file descriptor @a ufd cannot be resolved.
+ *
+ * - -EBADF is returned if the underlying device is being torn down at the time
+ *   of the call.
+ *
+ * - -EINVAL is returned if @a type is invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_fd_select(int ufd, struct xnselector *selector,
+		   unsigned int type)
+{
+	struct rtdm_fd *fd;
+	int ret;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd))
+		return PTR_ERR(fd);
+
+	set_compat_bit(fd);
+
+	ret = fd->ops->select(fd, selector, type, ufd);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+
+	return ret;
+}
+
+static void destroy_fd(void *cookie, struct xnid *id)
+{
+	struct cobalt_ppd *p = cookie;
+	struct rtdm_fd_index *idx;
+	spl_t s;
+
+	idx = container_of(id, struct rtdm_fd_index, id);
+	xnlock_get_irqsave(&fdtree_lock, s);
+	__fd_close(p, idx, 0);
+}
+
+void rtdm_fd_cleanup(struct cobalt_ppd *p)
+{
+	/*
+	 * This is called on behalf of a (userland) task exit handler,
+	 * so we don't have to deal with the regular file descriptors,
+	 * we only have to empty our own index.
+	 */
+	xntree_cleanup(&p->fds, p, destroy_fd);
+}
+
+void rtdm_fd_init(void)
+{
+	sema_init(&rtdm_fd_cleanup_sem, 0);
+	kthread_run(fd_cleanup_thread, NULL, "rtdm_fd");
+}
+
+static inline void warn_user(struct file *file, const char *call)
+{
+	struct dentry *dentry = file->f_path.dentry;
+	
+	printk(XENO_WARNING
+	       "%s[%d] called regular %s() on /dev/rtdm/%s\n",
+	       current->comm, task_pid_nr(current), call + 5, dentry->d_name.name);
+}
+
+static ssize_t dumb_read(struct file *file, char  __user *buf,
+			 size_t count, loff_t __user *ppos)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+static ssize_t dumb_write(struct file *file,  const char __user *buf,
+			  size_t count, loff_t __user *ppos)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+static unsigned int dumb_poll(struct file *file, poll_table *pt)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+static long dumb_ioctl(struct file *file, unsigned int cmd,
+		       unsigned long arg)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+const struct file_operations rtdm_dumb_fops = {
+	.read		= dumb_read,
+	.write		= dumb_write,
+	.poll		= dumb_poll,
+	.unlocked_ioctl	= dumb_ioctl,
+};
+++ linux-patched/kernel/xenomai/rtdm/internal.h	2022-03-21 12:58:29.114891643 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/core.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _RTDM_INTERNAL_H
+#define _RTDM_INTERNAL_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/sem.h>
+#include <linux/file.h>
+#include <linux/atomic.h>
+#include <cobalt/kernel/tree.h>
+#include <cobalt/kernel/lock.h>
+#include <rtdm/driver.h>
+
+static inline void __rtdm_get_device(struct rtdm_device *device)
+{
+	atomic_inc(&device->refcount);
+}
+
+void __rtdm_put_device(struct rtdm_device *device);
+
+struct rtdm_device *__rtdm_get_namedev(const char *path);
+
+struct rtdm_device *__rtdm_get_protodev(int protocol_family,
+					int socket_type);
+
+void __rtdm_dev_close(struct rtdm_fd *fd);
+
+int __rtdm_dev_ioctl_core(struct rtdm_fd *fd,
+			  unsigned int request, void __user *arg);
+
+int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset,
+			  int prot, int flags, void **pptr);
+
+/* nklock held, irqs off. */
+static inline void rtdm_fd_get_light(struct rtdm_fd *fd)
+{
+	++fd->refs;
+}
+
+int rtdm_init(void);
+
+void rtdm_cleanup(void);
+
+extern const struct file_operations rtdm_dumb_fops;
+
+#endif /* _RTDM_INTERNAL_H */
+++ linux-patched/kernel/xenomai/rtdm/core.c	2022-03-21 12:58:29.111891672 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/drvlib.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/fdtable.h>
+#include <linux/anon_inodes.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/heap.h>
+#include "rtdm/internal.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/cobalt-rtdm.h>
+#include "posix/process.h"
+
+/**
+ * @ingroup rtdm
+ * @defgroup rtdm_driver_interface Driver programming interface
+ * RTDM driver programming interface
+ * @{
+ */
+
+static void cleanup_instance(struct rtdm_device *dev,
+			     struct rtdm_dev_context *context)
+{
+	if (context)
+		kfree(context);
+
+	__rtdm_put_device(dev);
+}
+
+void __rtdm_dev_close(struct rtdm_fd *fd)
+{
+	struct rtdm_dev_context *context = rtdm_fd_to_context(fd);
+	struct rtdm_device *dev = context->device;
+	struct rtdm_driver *drv = dev->driver;
+
+	if (!fd->stale && drv->ops.close)
+		drv->ops.close(fd);
+
+	cleanup_instance(dev, context);
+}
+
+int __rtdm_anon_getfd(const char *name, int flags)
+{
+	return anon_inode_getfd(name, &rtdm_dumb_fops, NULL, flags);
+}
+
+void __rtdm_anon_putfd(int ufd)
+{
+	close_fd(ufd);
+}
+
+static int create_instance(int ufd, struct rtdm_device *dev,
+			   struct rtdm_dev_context **context_ptr)
+{
+	struct rtdm_driver *drv = dev->driver;
+	struct rtdm_dev_context *context;
+
+	/*
+	 * Reset to NULL so that we can always use cleanup_files/instance to
+	 * revert also partially successful allocations.
+	 */
+	*context_ptr = NULL;
+
+	if ((drv->device_flags & RTDM_EXCLUSIVE) != 0 &&
+	    atomic_read(&dev->refcount) > 1)
+		return -EBUSY;
+
+	context = kzalloc(sizeof(struct rtdm_dev_context) +
+			  drv->context_size, GFP_KERNEL);
+	if (unlikely(context == NULL))
+		return -ENOMEM;
+
+	context->device = dev;
+	*context_ptr = context;
+
+	return rtdm_fd_enter(&context->fd, ufd, RTDM_FD_MAGIC, &dev->ops);
+}
+
+#ifdef CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE
+
+static inline struct file *
+open_devnode(struct rtdm_device *dev, const char *path, int oflag)
+{
+	struct file *filp;
+	char *filename;
+
+	if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LEGACY) &&
+	    strncmp(path, "/dev/rtdm/", 10))
+		printk(XENO_WARNING
+		       "%s[%d] opens obsolete device path: %s\n",
+		       current->comm, task_pid_nr(current), path);
+
+	filename = kasprintf(GFP_KERNEL, "/dev/rtdm/%s", dev->name);
+	if (filename == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	filp = filp_open(filename, oflag, 0);
+	kfree(filename);
+
+	return filp;
+}
+
+#else /* !CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE */
+
+static inline struct file *
+open_devnode(struct rtdm_device *dev, const char *path, int oflag)
+{
+	return filp_open(path, oflag, 0);
+}
+
+#endif /* !CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE */
+
+int __rtdm_dev_open(const char *path, int oflag)
+{
+	struct rtdm_dev_context *context;
+	struct rtdm_device *dev;
+	struct file *filp;
+	int ufd, ret;
+
+	secondary_mode_only();
+
+	/*
+	 * CAUTION: we do want a lookup into the registry to happen
+	 * before any attempt is made to open the devnode, so that we
+	 * don't inadvertently open a regular (i.e. non-RTDM) device.
+	 * Reason is that opening, then closing a device - because we
+	 * don't manage it - may incur side-effects we don't want,
+	 * e.g. opening then closing one end of a pipe would cause the
+	 * other side to read the EOF condition.  This is basically
+	 * why we keep a RTDM registry for named devices, so that we
+	 * can figure out whether an open() request is going to be
+	 * valid, without having to open the devnode yet.
+	 */
+	dev = __rtdm_get_namedev(path);
+	if (dev == NULL)
+		return -EADV;
+
+	ufd = get_unused_fd_flags(oflag);
+	if (ufd < 0) {
+		ret = ufd;
+		goto fail_fd;
+	}
+
+	filp = open_devnode(dev, path, oflag);
+	if (IS_ERR(filp)) {
+		ret = PTR_ERR(filp);
+		goto fail_fopen;
+	}
+
+	ret = create_instance(ufd, dev, &context);
+	if (ret < 0)
+		goto fail_create;
+
+	context->fd.minor = dev->minor;
+	context->fd.oflags = oflag;
+
+	trace_cobalt_fd_open(current, &context->fd, ufd, oflag);
+
+	if (dev->ops.open) {
+		ret = dev->ops.open(&context->fd, oflag);
+		if (!XENO_ASSERT(COBALT, !spltest()))
+			splnone();
+		if (ret < 0)
+			goto fail_open;
+	}
+
+	ret = rtdm_device_new_fd(&context->fd, ufd, context->device);
+	if (ret < 0)
+		goto fail_open;
+
+	fd_install(ufd, filp);
+
+	return ufd;
+
+fail_open:
+	cleanup_instance(dev, context);
+fail_create:
+	filp_close(filp, current->files);
+fail_fopen:
+	put_unused_fd(ufd);
+fail_fd:
+	__rtdm_put_device(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__rtdm_dev_open);
+
+int __rtdm_dev_socket(int protocol_family, int socket_type,
+		      int protocol)
+{
+	struct rtdm_dev_context *context;
+	struct rtdm_device *dev;
+	int ufd, ret;
+
+	secondary_mode_only();
+
+	dev = __rtdm_get_protodev(protocol_family, socket_type);
+	if (dev == NULL)
+		return -EAFNOSUPPORT;
+
+	ufd = __rtdm_anon_getfd("[rtdm-socket]", O_RDWR);
+	if (ufd < 0) {
+		ret = ufd;
+		goto fail_getfd;
+	}
+
+	ret = create_instance(ufd, dev, &context);
+	if (ret < 0)
+		goto fail_create;
+
+	trace_cobalt_fd_socket(current, &context->fd, ufd, protocol_family);
+
+	if (dev->ops.socket) {
+		ret = dev->ops.socket(&context->fd, protocol);
+		if (!XENO_ASSERT(COBALT, !spltest()))
+			splnone();
+		if (ret < 0)
+			goto fail_socket;
+	}
+
+	ret = rtdm_device_new_fd(&context->fd, ufd, context->device);
+	if (ret < 0)
+		goto fail_socket;
+
+	return ufd;
+
+fail_socket:
+	cleanup_instance(dev, context);
+fail_create:
+	close_fd(ufd);
+fail_getfd:
+	__rtdm_put_device(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__rtdm_dev_socket);
+
+int __rtdm_dev_ioctl_core(struct rtdm_fd *fd, unsigned int request,
+			  void __user *arg)
+{
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_driver *drv;
+	struct rtdm_device_info dev_info;
+
+	if (fd->magic != RTDM_FD_MAGIC || request != RTIOC_DEVICE_INFO)
+		return -EADV;
+
+	drv = dev->driver;
+	dev_info.device_flags = drv->device_flags;
+	dev_info.device_class = drv->profile_info.class_id;
+	dev_info.device_sub_class = drv->profile_info.subclass_id;
+	dev_info.profile_version = drv->profile_info.version;
+
+	return rtdm_safe_copy_to_user(fd, arg, &dev_info,  sizeof(dev_info));
+}
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * @addtogroup rtdm_sync
+ *@{
+ */
+
+/**
+ * @fn void rtdm_waitqueue_init(struct rtdm_waitqueue *wq)
+ * @brief  Initialize a RTDM wait queue
+ *
+ * Sets up a wait queue structure for further use.
+ *
+ * @param wq waitqueue to initialize.
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_waitqueue_init(struct rtdm_waitqueue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq)
+ * @brief  Deletes a RTDM wait queue
+ *
+ * Dismantles a wait queue structure, releasing all resources attached
+ * to it.
+ *
+ * @param wq waitqueue to delete.
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq);
+
+/**
+ * @fn rtdm_timedwait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a locked waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true or a timeout occurs. The condition is checked each time the
+ * waitqueue @a wq is signaled.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_timedwait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition,
+				nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn rtdm_wait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition)
+ * @brief Sleep on a locked waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true. The condition is checked each time the waitqueue @a wq is
+ * signaled.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_wait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition);
+
+/**
+ * @fn rtdm_timedwait_condition(struct rtdm_wait_queue *wq, C_expr condition, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true or a timeout occurs. The condition is checked each time the
+ * waitqueue @a wq is signaled.
+ *
+ * @param wq waitqueue to wait on.
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_timedwait_condition(struct rtdm_wait_queue *wq, C_expr condition,
+			 nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn void rtdm_timedwait(struct rtdm_wait_queue *wq, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs.
+ *
+ * @param wq waitqueue to wait on.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_timedwait(struct rtdm_wait_queue *wq,
+		    nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn void rtdm_timedwait_locked(struct rtdm_wait_queue *wq, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a locked waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_timedwait_locked(struct rtdm_wait_queue *wq,
+			   nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn rtdm_wait_condition(struct rtdm_wait_queue *wq, C_expr condition)
+ * @brief Sleep on a waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true. The condition is checked each time the waitqueue @a wq is
+ * signaled.
+ *
+ * @param wq waitqueue to wait on
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_wait_condition(struct rtdm_wait_queue *wq, C_expr condition);
+
+/**
+ * @fn void rtdm_wait(struct rtdm_wait_queue *wq)
+ * @brief Sleep on a waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush().
+ *
+ * @param wq waitqueue to wait on.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_wait(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_wait_locked(struct rtdm_wait_queue *wq)
+ * @brief Sleep on a locked waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush().
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_wait_locked(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_lock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context)
+ * @brief Lock a waitqueue
+ *
+ * Acquires the lock on the waitqueue @a wq.
+ *
+ * @param wq waitqueue to lock.
+ *
+ * @param context name of local variable to store the context in.
+ *
+ * @note Recursive locking might lead to unexpected behavior,
+ * including lock up.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_waitqueue_lock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context);
+
+/**
+ * @fn void rtdm_waitqueue_unlock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context)
+ * @brief Unlock a waitqueue
+ *
+ * Releases the lock on the waitqueue @a wq.
+ *
+ * @param wq waitqueue to unlock.
+ *
+ * @param context name of local variable to retrieve the context from.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_waitqueue_unlock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context);
+
+/**
+ * @fn void rtdm_waitqueue_signal(struct rtdm_wait_queue *wq)
+ * @brief Signal a waitqueue
+ *
+ * Signals the waitqueue @a wq, waking up a single waiter (if
+ * any).
+ *
+ * @param wq waitqueue to signal.
+ *
+ * @return non-zero if a task has been readied as a result of this
+ * call, zero otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_signal(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_broadcast(struct rtdm_wait_queue *wq)
+ * @brief Broadcast a waitqueue
+ *
+ * Broadcast the waitqueue @a wq, waking up all waiters. Each
+ * readied task may assume to have received the wake up event.
+ *
+ * @param wq waitqueue to broadcast.
+ *
+ * @return non-zero if at least one task has been readied as a result
+ * of this call, zero otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_broadcast(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_flush(struct rtdm_wait_queue *wq)
+ * @brief Flush a waitqueue
+ *
+ * Flushes the waitqueue @a wq, unblocking all waiters with an error
+ * status (-EINTR).
+ *
+ * @param wq waitqueue to flush.
+ *
+ * @return non-zero if at least one task has been readied as a result
+ * of this call, zero otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_flush(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_wakeup(struct rtdm_wait_queue *wq, rtdm_task_t waiter)
+ * @brief Signal a particular waiter on a waitqueue
+ *
+ * Signals the waitqueue @a wq, waking up waiter @a waiter only,
+ * which must be currently sleeping on the waitqueue.
+ *
+ * @param wq waitqueue to signal.
+ *
+ * @param waiter RTDM task to wake up.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_wakeup(struct rtdm_wait_queue *wq, rtdm_task_t waiter);
+
+/**
+ * @fn rtdm_for_each_waiter(rtdm_task_t pos, struct rtdm_wait_queue *wq)
+ * @brief Simple iterator for waitqueues
+ *
+ * This construct traverses the wait list of a given waitqueue
+ * @a wq, assigning each RTDM task pointer to the cursor variable
+ * @a pos, which must be of type rtdm_task_t.
+ *
+ * @a wq must have been locked by a call to rtdm_waitqueue_lock()
+ * prior to traversing its wait list.
+ *
+ * @param pos cursor variable holding a pointer to the RTDM task
+ * being fetched.
+ *
+ * @param wq waitqueue to scan.
+ *
+ * @note The waitqueue should not be signaled, broadcast or flushed
+ * during the traversal, unless the loop is aborted immediately
+ * after. Should multiple waiters be readied while iterating, the safe
+ * form rtdm_for_each_waiter_safe() must be used for traversal
+ * instead.
+ *
+ * @coretags{unrestricted}
+ */
+rtdm_for_each_waiter(rtdm_task_t pos, struct rtdm_wait_queue *wq);
+
+/**
+ * @fn rtdm_for_each_waiter_safe(rtdm_task_t pos, rtdm_task_t tmp, struct rtdm_wait_queue *wq)
+ * @brief Safe iterator for waitqueues
+ *
+ * This construct traverses the wait list of a given waitqueue
+ * @a wq, assigning each RTDM task pointer to the cursor variable
+ * @a pos, which must be of type rtdm_task_t.
+ *
+ * Unlike with rtdm_for_each_waiter(), the waitqueue may be signaled,
+ * broadcast or flushed during the traversal.
+ *
+ * @a wq must have been locked by a call to rtdm_waitqueue_lock()
+ * prior to traversing its wait list.
+ *
+ * @param pos cursor variable holding a pointer to the RTDM task
+ * being fetched.
+ *
+ * @param tmp temporary cursor variable.
+ *
+ * @param wq waitqueue to scan.
+ *
+ * @coretags{unrestricted}
+ */
+rtdm_for_each_waiter_safe(rtdm_task_t pos, rtdm_task_t tmp, struct rtdm_wait_queue *wq);
+
+/** @} rtdm_sync */
+
+/**
+ * @defgroup rtdm_interdriver_api Driver to driver services
+ * Inter-driver interface
+ *@{
+ */
+
+/**
+ * @brief Open a device
+ *
+ * Refer to rtdm_open() for parameters and return values
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_open(const char *path, int oflag, ...);
+
+/**
+ * @brief Create a socket
+ *
+ * Refer to rtdm_socket() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_socket(int protocol_family, int socket_type, int protocol);
+
+/**
+ * @brief Close a device or socket
+ *
+ * Refer to rtdm_close() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_close(int fd);
+
+/**
+ * @brief Issue an IOCTL
+ *
+ * Refer to rtdm_ioctl() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_ioctl(int fd, int request, ...);
+
+/**
+ * @brief Read from device
+ *
+ * Refer to rtdm_read() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_read(int fd, void *buf, size_t nbyte);
+
+/**
+ * @brief Write to device
+ *
+ * Refer to rtdm_write() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_write(int fd, const void *buf, size_t nbyte);
+
+/**
+ * @brief Receive message from socket
+ *
+ * Refer to rtdm_recvmsg() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvmsg(int fd, struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Receive message from socket
+ *
+ * Refer to rtdm_recvfrom() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvfrom(int fd, void *buf, size_t len, int flags,
+		      struct sockaddr *from, socklen_t *fromlen);
+
+/**
+ * @brief Receive message from socket
+ *
+ * Refer to rtdm_recv() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recv(int fd, void *buf, size_t len, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * Refer to rtdm_sendmsg() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendmsg(int fd, const struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * Refer to rtdm_sendto() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendto(int fd, const void *buf, size_t len, int flags,
+		    const struct sockaddr *to, socklen_t tolen);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * Refer to rtdm_send() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_send(int fd, const void *buf, size_t len, int flags);
+
+/**
+ * @brief Bind to local address
+ *
+ * Refer to rtdm_bind() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen);
+
+/**
+ * @brief Connect to remote address
+ *
+ * Refer to rtdm_connect() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_connect(int fd, const struct sockaddr *serv_addr, socklen_t addrlen);
+
+/**
+ * @brief Listen to incoming connection requests
+ *
+ * Refer to rtdm_listen() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_listen(int fd, int backlog);
+
+/**
+ * @brief Accept a connection request
+ *
+ * Refer to rtdm_accept() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_accept(int fd, struct sockaddr *addr, socklen_t *addrlen);
+
+/**
+ * @brief Shut down parts of a connection
+ *
+ * Refer to rtdm_shutdown() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_shutdown(int fd, int how);
+
+/**
+ * @brief Get socket option
+ *
+ * Refer to rtdm_getsockopt() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockopt(int fd, int level, int optname, void *optval,
+		    socklen_t *optlen);
+
+/**
+ * @brief Set socket option
+ *
+ * Refer to rtdm_setsockopt() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_setsockopt(int fd, int level, int optname, const void *optval,
+		    socklen_t optlen);
+
+/**
+ * @brief Get local socket address
+ *
+ * Refer to rtdm_getsockname() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockname(int fd, struct sockaddr *name, socklen_t *namelen);
+
+/**
+ * @brief Get socket destination address
+ *
+ * Refer to rtdm_getpeername() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getpeername(int fd, struct sockaddr *name, socklen_t *namelen);
+
+/** @} Inter-driver calls */
+
+/** @} */
+
+/*!
+ * @addtogroup rtdm_user_api
+ * @{
+ */
+
+/**
+ * @brief Open a device
+ *
+ * @param[in] path Device name
+ * @param[in] oflag Open flags
+ * @param ... Further parameters will be ignored.
+ *
+ * @return Positive file descriptor value on success, otherwise a negative
+ * error code.
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c open() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_open(const char *path, int oflag, ...);
+
+/**
+ * @brief Create a socket
+ *
+ * @param[in] protocol_family Protocol family (@c PF_xxx)
+ * @param[in] socket_type Socket type (@c SOCK_xxx)
+ * @param[in] protocol Protocol ID, 0 for default
+ *
+ * @return Positive file descriptor value on success, otherwise a negative
+ * error code.
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c socket() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_socket(int protocol_family, int socket_type, int protocol);
+
+/**
+ * @brief Close a device or socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open() or rtdm_socket()
+ *
+ * @return 0 on success, otherwise a negative error code.
+ *
+ * @note If the matching rtdm_open() or rtdm_socket() call took place in
+ * non-real-time context, rtdm_close() must be issued within non-real-time
+ * as well. Otherwise, the call will fail.
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c close() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_close(int fd);
+
+/**
+ * @brief Issue an IOCTL
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open() or rtdm_socket()
+ * @param[in] request IOCTL code
+ * @param ... Optional third argument, depending on IOCTL function
+ * (@c void @c * or @c unsigned @c long)
+ *
+ * @return Positiv value on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c ioctl() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_ioctl(int fd, int request, ...);
+
+/**
+ * @brief Read from device
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open()
+ * @param[out] buf Input buffer
+ * @param[in] nbyte Number of bytes to read
+ *
+ * @return Number of bytes read, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c read() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_read(int fd, void *buf, size_t nbyte);
+
+/**
+ * @brief Write to device
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open()
+ * @param[in] buf Output buffer
+ * @param[in] nbyte Number of bytes to write
+ *
+ * @return Number of bytes written, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c write() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_write(int fd, const void *buf, size_t nbyte);
+
+/**
+ * @brief Receive message from socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in,out] msg Message descriptor
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes received, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c recvmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvmsg(int fd, struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Receive message from socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ * @param[out] from Buffer for message sender address
+ * @param[in,out] fromlen Address buffer size
+ *
+ * @return Number of bytes received, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c recvfrom() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvfrom(int fd, void *buf, size_t len, int flags,
+		      struct sockaddr *from, socklen_t *fromlen);
+
+/**
+ * @brief Receive message from socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes received, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c recv() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recv(int fd, void *buf, size_t len, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] msg Message descriptor
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes sent, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c sendmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendmsg(int fd, const struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ * @param[in] to Buffer for message destination address
+ * @param[in] tolen Address buffer size
+ *
+ * @return Number of bytes sent, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c sendto() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendto(int fd, const void *buf, size_t len, int flags,
+		    const struct sockaddr *to, socklen_t tolen);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes sent, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c send() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_send(int fd, const void *buf, size_t len, int flags);
+
+/**
+ * @brief Bind to local address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] my_addr Address buffer
+ * @param[in] addrlen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c bind() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen);
+
+/**
+ * @brief Connect to remote address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] serv_addr Address buffer
+ * @param[in] addrlen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c connect() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_connect(int fd, const struct sockaddr *serv_addr,
+		 socklen_t addrlen);
+
+/**
+ * @brief Listen for incomming connection requests
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] backlog Maximum queue length
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c listen() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_listen(int fd, int backlog);
+
+/**
+ * @brief Accept connection requests
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] addr Buffer for remote address
+ * @param[in,out] addrlen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c accept() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_accept(int fd, struct sockaddr *addr, socklen_t *addrlen);
+
+/**
+ * @brief Shut down parts of a connection
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] how Specifies the part to be shut down (@c SHUT_xxx)
+*
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c shutdown() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_shutdown(int fd, int how);
+
+/**
+ * @brief Get socket option
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] level Addressed stack level
+ * @param[in] optname Option name ID
+ * @param[out] optval Value buffer
+ * @param[in,out] optlen Value buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c getsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockopt(int fd, int level, int optname, void *optval,
+		      socklen_t *optlen);
+
+/**
+ * @brief Set socket option
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] level Addressed stack level
+ * @param[in] optname Option name ID
+ * @param[in] optval Value buffer
+ * @param[in] optlen Value buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c setsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_setsockopt(int fd, int level, int optname, const void *optval,
+		    socklen_t optlen);
+
+/**
+ * @brief Get local socket address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] name Address buffer
+ * @param[in,out] namelen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c getsockname() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockname(int fd, struct sockaddr *name, socklen_t *namelen);
+
+/**
+ * @brief Get socket destination address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] name Address buffer
+ * @param[in,out] namelen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c getpeername() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getpeername(int fd, struct sockaddr *name, socklen_t *namelen);
+
+#endif /* DOXYGEN_CPP */
+
+/** @} */
+++ linux-patched/kernel/xenomai/rtdm/drvlib.c	2022-03-21 12:58:29.107891711 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Real-Time Driver Model for Xenomai, driver library
+ *
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/mman.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <linux/highmem.h>
+#include <linux/err.h>
+#include <linux/anon_inodes.h>
+#include <rtdm/driver.h>
+#include <rtdm/compat.h>
+#include <pipeline/inband_work.h>
+#include "internal.h"
+#include <trace/events/cobalt-rtdm.h>
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_clock Clock Services
+ * @{
+ */
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * @brief Get system time
+ *
+ * @return The system time in nanoseconds is returned
+ *
+ * @note The resolution of this service depends on the system timer. In
+ * particular, if the system timer is running in periodic mode, the return
+ * value will be limited to multiples of the timer tick period.
+ *
+ * @note The system timer may have to be started to obtain valid results.
+ * Whether this happens automatically (as on Xenomai) or is controlled by the
+ * application depends on the RTDM host environment.
+ *
+ * @coretags{unrestricted}
+ */
+nanosecs_abs_t rtdm_clock_read(void);
+
+/**
+ * @brief Get monotonic time
+ *
+ * @return The monotonic time in nanoseconds is returned
+ *
+ * @note The resolution of this service depends on the system timer. In
+ * particular, if the system timer is running in periodic mode, the return
+ * value will be limited to multiples of the timer tick period.
+ *
+ * @note The system timer may have to be started to obtain valid results.
+ * Whether this happens automatically (as on Xenomai) or is controlled by the
+ * application depends on the RTDM host environment.
+ *
+ * @coretags{unrestricted}
+ */
+nanosecs_abs_t rtdm_clock_read_monotonic(void);
+#endif /* DOXYGEN_CPP */
+/** @} */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_task Task Services
+ * @{
+ */
+
+/**
+ * @brief Initialise and start a real-time task
+ *
+ * After initialising a task, the task handle remains valid and can be
+ * passed to RTDM services until either rtdm_task_destroy() or
+ * rtdm_task_join() was invoked.
+ *
+ * @param[in,out] task Task handle
+ * @param[in] name Optional task name
+ * @param[in] task_proc Procedure to be executed by the task
+ * @param[in] arg Custom argument passed to @c task_proc() on entry
+ * @param[in] priority Priority of the task, see also
+ * @ref rtdmtaskprio "Task Priority Range"
+ * @param[in] period Period in nanoseconds of a cyclic task, 0 for non-cyclic
+ * mode. Waiting for the first and subsequent periodic events is
+ * done using rtdm_task_wait_period().
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_task_init(rtdm_task_t *task, const char *name,
+		   rtdm_task_proc_t task_proc, void *arg,
+		   int priority, nanosecs_rel_t period)
+{
+	union xnsched_policy_param param;
+	struct xnthread_start_attr sattr;
+	struct xnthread_init_attr iattr;
+	int err;
+
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	iattr.name = name;
+	iattr.flags = 0;
+	iattr.personality = &xenomai_personality;
+	iattr.affinity = CPU_MASK_ALL;
+	param.rt.prio = priority;
+
+	err = xnthread_init(task, &iattr, &xnsched_class_rt, &param);
+	if (err)
+		return err;
+
+	/* We need an anonymous registry entry to obtain a handle for fast
+	   mutex locking. */
+	err = xnthread_register(task, "");
+	if (err)
+		goto cleanup_out;
+
+	if (period > 0) {
+		err = xnthread_set_periodic(task, XN_INFINITE,
+					    XN_RELATIVE, period);
+		if (err)
+			goto cleanup_out;
+	}
+
+	sattr.mode = 0;
+	sattr.entry = task_proc;
+	sattr.cookie = arg;
+	err = xnthread_start(task, &sattr);
+	if (err)
+		goto cleanup_out;
+
+	return 0;
+
+      cleanup_out:
+	xnthread_cancel(task);
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_task_init);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+/**
+ * @brief Destroy a real-time task
+ *
+ * This call sends a termination request to @a task, then waits for it
+ * to exit. All RTDM task should check for pending termination
+ * requests by calling rtdm_task_should_stop() from their work loop.
+ *
+ * If @a task is current, rtdm_task_destroy() terminates the current
+ * context, and does not return to the caller.
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init()
+ *
+ * @note Passing the same task handle to RTDM services after the completion of
+ * this function is not allowed.
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+void rtdm_task_destroy(rtdm_task_t *task);
+
+/**
+ * @brief Check for pending termination request
+ *
+ * Check whether a termination request was received by the current
+ * RTDM task. Termination requests are sent by calling
+ * rtdm_task_destroy().
+ *
+ * @return Non-zero indicates that a termination request is pending,
+ * in which case the caller should wrap up and exit.
+ *
+ * @coretags{rtdm-task, might-switch}
+ */
+int rtdm_task_should_stop(void);
+
+/**
+ * @brief Adjust real-time task priority
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init()
+ * @param[in] priority New priority of the task, see also
+ * @ref rtdmtaskprio "Task Priority Range"
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_task_set_priority(rtdm_task_t *task, int priority);
+
+/**
+ * @brief Adjust real-time task period
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init(), or
+ * NULL for referring to the current RTDM task or Cobalt thread.
+ *
+ * @param[in] start_date The initial (absolute) date of the first
+ * release point, expressed in nanoseconds.  @a task will be delayed
+ * by the first call to rtdm_task_wait_period() until this point is
+ * reached. If @a start_date is zero, the first release point is set
+ * to @a period nanoseconds after the current date.
+
+ * @param[in] period New period in nanoseconds of a cyclic task, zero
+ * to disable cyclic mode for @a task.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_task_set_period(rtdm_task_t *task, nanosecs_abs_t start_date,
+			 nanosecs_rel_t period);
+
+/**
+ * @brief Wait on next real-time task period
+ *
+ * @param[in] overruns_r Address of a long word receiving the count of
+ * overruns if -ETIMEDOUT is returned, or NULL if the caller don't
+ * need that information.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if calling task is not in periodic mode.
+ *
+ * - -ETIMEDOUT is returned if a timer overrun occurred, which indicates
+ * that a previous release point has been missed by the calling task.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_wait_period(unsigned long *overruns_r);
+
+/**
+ * @brief Activate a blocked real-time task
+ *
+ * @return Non-zero is returned if the task was actually unblocked from a
+ * pending wait state, 0 otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+int rtdm_task_unblock(rtdm_task_t *task);
+
+/**
+ * @brief Get current real-time task
+ *
+ * @return Pointer to task handle
+ *
+ * @coretags{mode-unrestricted}
+ */
+rtdm_task_t *rtdm_task_current(void);
+
+/**
+ * @brief Sleep a specified amount of time
+ *
+ * @param[in] delay Delay in nanoseconds, see @ref RTDM_TIMEOUT_xxx for
+ * special values.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_sleep(nanosecs_rel_t delay);
+
+/**
+ * @brief Sleep until a specified absolute time
+ *
+ * @deprecated Use rtdm_task_sleep_abs instead!
+ *
+ * @param[in] wakeup_time Absolute timeout in nanoseconds
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_sleep_until(nanosecs_abs_t wakeup_time);
+
+/**
+ * @brief Sleep until a specified absolute time
+ *
+ * @param[in] wakeup_time Absolute timeout in nanoseconds
+ * @param[in] mode Selects the timer mode, see RTDM_TIMERMODE_xxx for details
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * - -EINVAL is returned if an invalid parameter was passed.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_sleep_abs(nanosecs_abs_t wakeup_time, enum rtdm_timer_mode mode);
+
+/**
+ * @brief Safe busy waiting
+ *
+ * This service alternates active spinning and sleeping within a wait
+ * loop, until a condition is satisfied. While sleeping, a task is
+ * scheduled out and does not consume any CPU time.
+ *
+ * rtdm_task_busy_wait() is particularly useful for waiting for a
+ * state change reading an I/O register, which usually happens shortly
+ * after the wait starts, without incurring the adverse effects of
+ * long busy waiting if it doesn't.
+ *
+ * @param[in] condition The C expression to be tested for detecting
+ * completion.
+ * @param[in] spin_ns The time to spin on @a condition before
+ * sleeping, expressed as a count of nanoseconds.
+ * @param[in] sleep_ns The time to sleep for before spinning again,
+ * expressed as a count of nanoseconds.
+ *
+ * @return 0 on success if @a condition is satisfied, otherwise:
+ *
+ * - -EINTR is returned if the calling task has been unblocked by a
+ * Linux signal or explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_busy_wait(bool condition, nanosecs_rel_t spin_ns,
+			nanosecs_rel_t sleep_ns);
+
+/**
+ * @brief Register wait context
+ *
+ * rtdm_wait_prepare() registers a wait context structure for the
+ * caller, which can be later retrieved by a call to
+ * rtdm_wait_get_context(). This call is normally issued before the
+ * current task blocks on a wait object, waiting for some (producer)
+ * code to wake it up. Arbitrary data can be exchanged between both
+ * sites via the wait context structure, which is allocated by the
+ * waiter (consumer) side.
+ *
+ * @a wc is the address of an anchor object which is commonly embedded
+ * into a larger structure with arbitrary contents, which needs to be
+ * shared between the consumer (waiter) and the producer for
+ * implementing the wait code.
+ *
+ * A typical implementation pattern for the wait side is:
+ *
+ * @code
+ * struct rtdm_waitqueue wq;
+ * struct some_wait_context {
+ *    int input_value;
+ *    int output_value;
+ *    struct rtdm_wait_context wc;
+ * } wait_context;
+ *
+ * wait_context.input_value = 42;
+ * rtdm_wait_prepare(&wait_context);
+ * ret = rtdm_wait_condition(&wq, rtdm_wait_is_completed(&wait_context));
+ * if (ret)
+ *     goto wait_failed;
+ * handle_event(wait_context.output_value);
+ * @endcode
+ *
+ * On the producer side, the implementation would look like:
+ *
+ * @code
+ * struct rtdm_waitqueue wq;
+ * struct some_wait_context {
+ *    int input_value;
+ *    int output_value;
+ *    struct rtdm_wait_context wc;
+ * } *wait_context_ptr;
+ * struct rtdm_wait_context *wc;
+ * rtdm_task_t *task;
+ *
+ * rtdm_for_each_waiter(task, &wq) {
+ *    wc = rtdm_wait_get_context(task);
+ *    wait_context_ptr = container_of(wc, struct some_wait_context, wc);
+ *    wait_context_ptr->output_value = 12;
+ * }
+ * rtdm_waitqueue_broadcast(&wq);
+ * @endcode
+ *
+ * @param wc Wait context to register.
+ */
+void rtdm_wait_prepare(struct rtdm_wait_context *wc);
+
+/**
+ * @brief Mark completion for a wait context
+ *
+ * rtdm_complete_wait() marks a wait context as completed, so that
+ * rtdm_wait_is_completed() returns true for such context.
+ *
+ * @param wc Wait context to complete.
+ */
+void rtdm_wait_complete(struct rtdm_wait_context *wc);
+
+/**
+ * @brief Test completion of a wait context
+ *
+ * rtdm_wait_is_completed() returns true if rtdm_complete_wait() was
+ * called for @a wc. The completion mark is reset each time
+ * rtdm_wait_prepare() is called for a wait context.
+ *
+ * @param wc Wait context to check for completion.
+ *
+ * @return non-zero/true if rtdm_wait_complete() was called for @a wc,
+ * zero otherwise.
+ */
+int rtdm_wait_is_completed(struct rtdm_wait_context *wc);
+
+#endif /* DOXYGEN_CPP */
+
+int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode)
+{
+	struct xnthread *thread;
+
+	if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p()))
+		return -EPERM;
+
+	thread = xnthread_current();
+	xnthread_suspend(thread, XNDELAY, timeout, mode, NULL);
+
+	return xnthread_test_info(thread, XNBREAK) ? -EINTR : 0;
+}
+
+EXPORT_SYMBOL_GPL(__rtdm_task_sleep);
+
+/**
+ * @brief Wait on a real-time task to terminate
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init()
+ *
+ * @note Passing the same task handle to RTDM services after the
+ * completion of this function is not allowed.
+ *
+ * @note This service does not trigger the termination of the targeted
+ * task.  The user has to take of this, otherwise rtdm_task_join()
+ * will never return.
+ *
+ * @coretags{mode-unrestricted}
+ */
+void rtdm_task_join(rtdm_task_t *task)
+{
+	trace_cobalt_driver_task_join(task);
+
+	xnthread_join(task, true);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_task_join);
+
+/**
+ * @brief Busy-wait a specified amount of time
+ *
+ * This service does not schedule out the caller, but rather spins in
+ * a tight loop, burning CPU cycles until the timeout elapses.
+ *
+ * @param[in] delay Delay in nanoseconds. Note that a zero delay does @b not
+ * have the meaning of @c RTDM_TIMEOUT_INFINITE here.
+ *
+ * @note The caller must not be migratable to different CPUs while executing
+ * this service. Otherwise, the actual delay will be undefined.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_task_busy_sleep(nanosecs_rel_t delay)
+{
+	xnticks_t wakeup;
+
+	wakeup = xnclock_read_raw(&nkclock) +
+		xnclock_ns_to_ticks(&nkclock, delay);
+
+	while ((xnsticks_t)(xnclock_read_raw(&nkclock) - wakeup) < 0)
+		cpu_relax();
+}
+
+EXPORT_SYMBOL_GPL(rtdm_task_busy_sleep);
+/** @} */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_timer Timer Services
+ * @{
+ */
+
+/**
+ * @brief Initialise a timer
+ *
+ * @param[in,out] timer Timer handle
+ * @param[in] handler Handler to be called on timer expiry
+ * @param[in] name Optional timer name
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_timer_init(rtdm_timer_t *timer, rtdm_timer_handler_t handler,
+		    const char *name)
+{
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	xntimer_init((timer), &nkclock, handler, NULL, XNTIMER_IGRAVITY);
+	xntimer_set_name((timer), (name));
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_init);
+
+/**
+ * @brief Destroy a timer
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_timer_destroy(rtdm_timer_t *timer)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_destroy(timer);
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_destroy);
+
+/**
+ * @brief Start a timer
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ * @param[in] expiry Firing time of the timer, @c mode defines if relative or
+ * absolute
+ * @param[in] interval Relative reload value, > 0 if the timer shall work in
+ * periodic mode with the specific interval, 0 for one-shot timers
+ * @param[in] mode Defines the operation mode, see @ref RTDM_TIMERMODE_xxx for
+ * possible values
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if @c expiry describes an absolute date in
+ * the past. In such an event, the timer is nevertheless armed for the
+ * next shot in the timeline if @a interval is non-zero.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry,
+		     nanosecs_rel_t interval, enum rtdm_timer_mode mode)
+{
+	spl_t s;
+	int err;
+
+	xnlock_get_irqsave(&nklock, s);
+	err = xntimer_start(timer, expiry, interval, (xntmode_t)mode);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_start);
+
+/**
+ * @brief Stop a timer
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_timer_stop(rtdm_timer_t *timer)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_stop(timer);
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_stop);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+/**
+ * @brief Start a timer from inside a timer handler
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ * @param[in] expiry Firing time of the timer, @c mode defines if relative or
+ * absolute
+ * @param[in] interval Relative reload value, > 0 if the timer shall work in
+ * periodic mode with the specific interval, 0 for one-shot timers
+ * @param[in] mode Defines the operation mode, see @ref RTDM_TIMERMODE_xxx for
+ * possible values
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if @c expiry describes an absolute date in the
+ * past.
+ *
+ * @coretags{coreirq-only}
+ */
+int rtdm_timer_start_in_handler(rtdm_timer_t *timer, nanosecs_abs_t expiry,
+				nanosecs_rel_t interval,
+				enum rtdm_timer_mode mode);
+
+/**
+ * @brief Stop a timer from inside a timer handler
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ *
+ * @coretags{coreirq-only}
+ */
+void rtdm_timer_stop_in_handler(rtdm_timer_t *timer);
+#endif /* DOXYGEN_CPP */
+/** @} */
+
+/* --- IPC cleanup helper --- */
+
+#define RTDM_SYNCH_DELETED          XNSYNCH_SPARE0
+
+void __rtdm_synch_flush(struct xnsynch *synch, unsigned long reason)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (reason == XNRMID)
+		xnsynch_set_status(synch, RTDM_SYNCH_DELETED);
+
+	if (likely(xnsynch_flush(synch, reason) == XNSYNCH_RESCHED))
+		xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(__rtdm_synch_flush);
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_sync Synchronisation Services
+ * @{
+ */
+
+/*!
+ * @name Timeout Sequence Management
+ * @{
+ */
+
+/**
+ * @brief Initialise a timeout sequence
+ *
+ * This service initialises a timeout sequence handle according to the given
+ * timeout value. Timeout sequences allow to maintain a continuous @a timeout
+ * across multiple calls of blocking synchronisation services. A typical
+ * application scenario is given below.
+ *
+ * @param[in,out] timeout_seq Timeout sequence handle
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ *
+ * Application Scenario:
+ * @code
+int device_service_routine(...)
+{
+	rtdm_toseq_t timeout_seq;
+	...
+
+	rtdm_toseq_init(&timeout_seq, timeout);
+	...
+	while (received < requested) {
+		ret = rtdm_event_timedwait(&data_available, timeout, &timeout_seq);
+		if (ret < 0) // including -ETIMEDOUT
+			break;
+
+		// receive some data
+		...
+	}
+	...
+}
+ * @endcode
+ * Using a timeout sequence in such a scenario avoids that the user-provided
+ * relative @c timeout is restarted on every call to rtdm_event_timedwait(),
+ * potentially causing an overall delay that is larger than specified by
+ * @c timeout. Moreover, all functions supporting timeout sequences also
+ * interpret special timeout values (infinite and non-blocking),
+ * disburdening the driver developer from handling them separately.
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout)
+{
+	XENO_WARN_ON(COBALT, xnsched_unblockable_p()); /* only warn here */
+
+	*timeout_seq = xnclock_read_monotonic(&nkclock) + timeout;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_toseq_init);
+
+/** @} */
+
+/**
+ * @ingroup rtdm_sync
+ * @defgroup rtdm_sync_event Event Services
+ * @{
+ */
+
+/**
+ * @brief Initialise an event
+ *
+ * @param[in,out] event Event handle
+ * @param[in] pending Non-zero if event shall be initialised as set, 0 otherwise
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_event_init(rtdm_event_t *event, unsigned long pending)
+{
+	spl_t s;
+
+	trace_cobalt_driver_event_init(event, pending);
+
+	/* Make atomic for re-initialisation support */
+	xnlock_get_irqsave(&nklock, s);
+
+	xnsynch_init(&event->synch_base, XNSYNCH_PRIO, NULL);
+	if (pending)
+		xnsynch_set_status(&event->synch_base, RTDM_EVENT_PENDING);
+	xnselect_init(&event->select_block);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_init);
+
+/**
+ * @brief Destroy an event
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_event_destroy(rtdm_event_t *event)
+{
+	trace_cobalt_driver_event_destroy(event);
+	if (realtime_core_enabled()) {
+		__rtdm_synch_flush(&event->synch_base, XNRMID);
+		xnselect_destroy(&event->select_block);
+	}
+}
+EXPORT_SYMBOL_GPL(rtdm_event_destroy);
+
+/**
+ * @brief Signal an event occurrence to currently listening waiters
+ *
+ * This function wakes up all current waiters of the given event, but it does
+ * not change the event state. Subsequently callers of rtdm_event_wait() or
+ * rtdm_event_timedwait() will therefore be blocked first.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_event_pulse(rtdm_event_t *event)
+{
+	trace_cobalt_driver_event_pulse(event);
+	__rtdm_synch_flush(&event->synch_base, 0);
+}
+EXPORT_SYMBOL_GPL(rtdm_event_pulse);
+
+/**
+ * @brief Signal an event occurrence
+ *
+ * This function sets the given event and wakes up all current waiters. If no
+ * waiter is presently registered, the next call to rtdm_event_wait() or
+ * rtdm_event_timedwait() will return immediately.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_event_signal(rtdm_event_t *event)
+{
+	int resched = 0;
+	spl_t s;
+
+	trace_cobalt_driver_event_signal(event);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnsynch_set_status(&event->synch_base, RTDM_EVENT_PENDING);
+	if (xnsynch_flush(&event->synch_base, 0))
+		resched = 1;
+	if (xnselect_signal(&event->select_block, 1))
+		resched = 1;
+	if (resched)
+		xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_signal);
+
+/**
+ * @brief Wait on event occurrence
+ *
+ * This is the light-weight version of rtdm_event_timedwait(), implying an
+ * infinite timeout.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a event has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_event_wait(rtdm_event_t *event)
+{
+	return rtdm_event_timedwait(event, 0, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_wait);
+
+/**
+ * @brief Wait on event occurrence with timeout
+ *
+ * This function waits or tests for the occurence of the given event, taking
+ * the provided timeout into account. On successful return, the event is
+ * reset.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ * @param[in,out] timeout_seq Handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a event has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * - -EWOULDBLOCK is returned if a negative @a timeout (i.e., non-blocking
+ * operation) has been specified.
+ *
+ * @coretags{primary-timed, might-switch}
+ */
+int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq)
+{
+	struct xnthread *thread;
+	int err = 0, ret;
+	spl_t s;
+
+	if (!XENO_ASSERT(COBALT, timeout < 0 || !xnsched_unblockable_p()))
+		return -EPERM;
+
+	trace_cobalt_driver_event_wait(event, xnthread_current());
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(event->synch_base.status & RTDM_SYNCH_DELETED))
+		err = -EIDRM;
+	else if (likely(event->synch_base.status & RTDM_EVENT_PENDING)) {
+		xnsynch_clear_status(&event->synch_base, RTDM_EVENT_PENDING);
+		xnselect_signal(&event->select_block, 0);
+	} else {
+		/* non-blocking mode */
+		if (timeout < 0) {
+			err = -EWOULDBLOCK;
+			goto unlock_out;
+		}
+
+		thread = xnthread_current();
+
+		if (timeout_seq && (timeout > 0))
+			/* timeout sequence */
+			ret = xnsynch_sleep_on(&event->synch_base, *timeout_seq,
+					       XN_ABSOLUTE);
+		else
+			/* infinite or relative timeout */
+			ret = xnsynch_sleep_on(&event->synch_base, timeout, XN_RELATIVE);
+
+		if (likely(ret == 0)) {
+			xnsynch_clear_status(&event->synch_base,
+					    RTDM_EVENT_PENDING);
+			xnselect_signal(&event->select_block, 0);
+		} else if (ret & XNTIMEO)
+			err = -ETIMEDOUT;
+		else if (ret & XNRMID)
+			err = -EIDRM;
+		else /* XNBREAK */
+			err = -EINTR;
+	}
+
+unlock_out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_timedwait);
+
+/**
+ * @brief Clear event state
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_event_clear(rtdm_event_t *event)
+{
+	spl_t s;
+
+	trace_cobalt_driver_event_clear(event);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnsynch_clear_status(&event->synch_base, RTDM_EVENT_PENDING);
+	xnselect_signal(&event->select_block, 0);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_clear);
+
+/**
+ * @brief Bind a selector to an event
+ *
+ * This functions binds the given selector to an event so that the former is
+ * notified when the event state changes. Typically the select binding handler
+ * will invoke this service.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ * @param[in,out] selector Selector as passed to the select binding handler
+ * @param[in] type Type of the bound event as passed to the select binding handler
+ * @param[in] fd_index File descriptor index as passed to the select binding
+ * handler
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ENOMEM is returned if there is insufficient memory to establish the
+ * dynamic binding.
+ *
+ * - -EINVAL is returned if @a type or @a fd_index are invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_event_select(rtdm_event_t *event, rtdm_selector_t *selector,
+		      enum rtdm_selecttype type, unsigned int fd_index)
+{
+	struct xnselect_binding *binding;
+	int err;
+	spl_t s;
+
+	binding = xnmalloc(sizeof(*binding));
+	if (!binding)
+		return -ENOMEM;
+
+	xnlock_get_irqsave(&nklock, s);
+	err = xnselect_bind(&event->select_block,
+			    binding, selector, type, fd_index,
+			    event->synch_base.status & (RTDM_SYNCH_DELETED |
+						       RTDM_EVENT_PENDING));
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (err)
+		xnfree(binding);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtdm_event_select);
+
+/** @} */
+
+/**
+ * @ingroup rtdm_sync
+ * @defgroup rtdm_sync_sem Semaphore Services
+ * @{
+ */
+
+/**
+ * @brief Initialise a semaphore
+ *
+ * @param[in,out] sem Semaphore handle
+ * @param[in] value Initial value of the semaphore
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value)
+{
+	spl_t s;
+
+	trace_cobalt_driver_sem_init(sem, value);
+
+	/* Make atomic for re-initialisation support */
+	xnlock_get_irqsave(&nklock, s);
+
+	sem->value = value;
+	xnsynch_init(&sem->synch_base, XNSYNCH_PRIO, NULL);
+	xnselect_init(&sem->select_block);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_init);
+
+/**
+ * @brief Destroy a semaphore
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_sem_destroy(rtdm_sem_t *sem)
+{
+	trace_cobalt_driver_sem_destroy(sem);
+	if (realtime_core_enabled()) {
+		__rtdm_synch_flush(&sem->synch_base, XNRMID);
+		xnselect_destroy(&sem->select_block);
+	}
+}
+EXPORT_SYMBOL_GPL(rtdm_sem_destroy);
+
+/**
+ * @brief Decrement a semaphore
+ *
+ * This is the light-weight version of rtdm_sem_timeddown(), implying an
+ * infinite timeout.
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a sem has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_sem_down(rtdm_sem_t *sem)
+{
+	return rtdm_sem_timeddown(sem, 0, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_down);
+
+/**
+ * @brief Decrement a semaphore with timeout
+ *
+ * This function tries to decrement the given semphore's value if it is
+ * positive on entry. If not, the caller is blocked unless non-blocking
+ * operation was selected.
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ * @param[in,out] timeout_seq Handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is negative and the semaphore
+ * value is currently not positive.
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a sem has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-timed, might-switch}
+ */
+int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
+		       rtdm_toseq_t *timeout_seq)
+{
+	struct xnthread *thread;
+	int err = 0, ret;
+	spl_t s;
+
+	if (!XENO_ASSERT(COBALT, timeout < 0 || !xnsched_unblockable_p()))
+		return -EPERM;
+
+	trace_cobalt_driver_sem_wait(sem, xnthread_current());
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(sem->synch_base.status & RTDM_SYNCH_DELETED))
+		err = -EIDRM;
+	else if (sem->value > 0) {
+		if(!--sem->value)
+			xnselect_signal(&sem->select_block, 0);
+	} else if (timeout < 0) /* non-blocking mode */
+		err = -EWOULDBLOCK;
+	else {
+		thread = xnthread_current();
+
+		if (timeout_seq && timeout > 0)
+			/* timeout sequence */
+			ret = xnsynch_sleep_on(&sem->synch_base, *timeout_seq,
+					       XN_ABSOLUTE);
+		else
+			/* infinite or relative timeout */
+			ret = xnsynch_sleep_on(&sem->synch_base, timeout, XN_RELATIVE);
+
+		if (ret) {
+			if (ret & XNTIMEO)
+				err = -ETIMEDOUT;
+			else if (ret & XNRMID)
+				err = -EIDRM;
+			else /* XNBREAK */
+				err = -EINTR;
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_timeddown);
+
+/**
+ * @brief Increment a semaphore
+ *
+ * This function increments the given semphore's value, waking up a potential
+ * waiter which was blocked upon rtdm_sem_down().
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_sem_up(rtdm_sem_t *sem)
+{
+	spl_t s;
+
+	trace_cobalt_driver_sem_up(sem);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnsynch_wakeup_one_sleeper(&sem->synch_base))
+		xnsched_run();
+	else
+		if (sem->value++ == 0
+		    && xnselect_signal(&sem->select_block, 1))
+			xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_up);
+
+/**
+ * @brief Bind a selector to a semaphore
+ *
+ * This functions binds the given selector to the semaphore so that the former
+ * is notified when the semaphore state changes. Typically the select binding
+ * handler will invoke this service.
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ * @param[in,out] selector Selector as passed to the select binding handler
+ * @param[in] type Type of the bound event as passed to the select binding handler
+ * @param[in] fd_index File descriptor index as passed to the select binding
+ * handler
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ENOMEM is returned if there is insufficient memory to establish the
+ * dynamic binding.
+ *
+ * - -EINVAL is returned if @a type or @a fd_index are invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_sem_select(rtdm_sem_t *sem, rtdm_selector_t *selector,
+		    enum rtdm_selecttype type, unsigned int fd_index)
+{
+	struct xnselect_binding *binding;
+	int err;
+	spl_t s;
+
+	binding = xnmalloc(sizeof(*binding));
+	if (!binding)
+		return -ENOMEM;
+
+	xnlock_get_irqsave(&nklock, s);
+	err = xnselect_bind(&sem->select_block, binding, selector,
+			    type, fd_index,
+			    (sem->value > 0) ||
+			    sem->synch_base.status & RTDM_SYNCH_DELETED);
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (err)
+		xnfree(binding);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtdm_sem_select);
+
+/** @} */
+
+/**
+ * @ingroup rtdm_sync
+ * @defgroup rtdm_sync_mutex Mutex services
+ * @{
+ */
+
+/**
+ * @brief Initialise a mutex
+ *
+ * This function initalises a basic mutex with priority inversion protection.
+ * "Basic", as it does not allow a mutex owner to recursively lock the same
+ * mutex again.
+ *
+ * @param[in,out] mutex Mutex handle
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_mutex_init(rtdm_mutex_t *mutex)
+{
+	spl_t s;
+
+	/* Make atomic for re-initialisation support */
+	xnlock_get_irqsave(&nklock, s);
+	xnsynch_init(&mutex->synch_base, XNSYNCH_PI, &mutex->fastlock);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_mutex_init);
+
+/**
+ * @brief Destroy a mutex
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_mutex_destroy(rtdm_mutex_t *mutex)
+{
+	trace_cobalt_driver_mutex_destroy(mutex);
+
+	if (realtime_core_enabled())
+		__rtdm_synch_flush(&mutex->synch_base, XNRMID);
+}
+EXPORT_SYMBOL_GPL(rtdm_mutex_destroy);
+
+/**
+ * @brief Release a mutex
+ *
+ * This function releases the given mutex, waking up a potential waiter which
+ * was blocked upon rtdm_mutex_lock() or rtdm_mutex_timedlock().
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_mutex_unlock(rtdm_mutex_t *mutex)
+{
+	if (!XENO_ASSERT(COBALT, !xnsched_interrupt_p()))
+		return;
+
+	trace_cobalt_driver_mutex_release(mutex);
+
+	if (unlikely(xnsynch_release(&mutex->synch_base,
+				     xnsched_current_thread())))
+		xnsched_run();
+}
+EXPORT_SYMBOL_GPL(rtdm_mutex_unlock);
+
+/**
+ * @brief Request a mutex
+ *
+ * This is the light-weight version of rtdm_mutex_timedlock(), implying an
+ * infinite timeout.
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EIDRM is returned if @a mutex has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_mutex_lock(rtdm_mutex_t *mutex)
+{
+	return rtdm_mutex_timedlock(mutex, 0, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_mutex_lock);
+
+/**
+ * @brief Request a mutex with timeout
+ *
+ * This function tries to acquire the given mutex. If it is not available, the
+ * caller is blocked unless non-blocking operation was selected.
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ * @param[in,out] timeout_seq Handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is negative and the semaphore
+ * value is currently not positive.
+ *
+ * - -EIDRM is returned if @a mutex has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq)
+{
+	struct xnthread *curr;
+	int ret;
+	spl_t s;
+
+	if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p()))
+		return -EPERM;
+
+	curr = xnthread_current();
+	trace_cobalt_driver_mutex_wait(mutex, curr);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(mutex->synch_base.status & RTDM_SYNCH_DELETED)) {
+		ret = -EIDRM;
+		goto out;
+	}
+
+	ret = xnsynch_try_acquire(&mutex->synch_base);
+	if (ret != -EBUSY)
+		goto out;
+
+	if (timeout < 0) {
+		ret = -EWOULDBLOCK;
+		goto out;
+	}
+
+	for (;;) {
+		if (timeout_seq && timeout > 0) /* timeout sequence */
+			ret = xnsynch_acquire(&mutex->synch_base, *timeout_seq,
+					      XN_ABSOLUTE);
+		else		/* infinite or relative timeout */
+			ret = xnsynch_acquire(&mutex->synch_base, timeout,
+					      XN_RELATIVE);
+		if (ret == 0)
+			break;
+		if (ret & XNBREAK)
+			continue;
+		ret = ret & XNTIMEO ? -ETIMEDOUT : -EIDRM;
+		break;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_mutex_timedlock);
+/** @} */
+
+/** @} Synchronisation services */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_irq Interrupt Management Services
+ * @{
+ */
+
+/**
+ * @brief Register an interrupt handler
+ *
+ * This function registers the provided handler with an IRQ line and enables
+ * the line.
+ *
+ * @param[in,out] irq_handle IRQ handle
+ * @param[in] irq_no Line number of the addressed IRQ
+ * @param[in] handler Interrupt handler
+ * @param[in] flags Registration flags, see @ref RTDM_IRQTYPE_xxx for details
+ * @param[in] device_name Device name to show up in real-time IRQ lists
+ * @param[in] arg Pointer to be passed to the interrupt handler on invocation
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if an invalid parameter was passed.
+ *
+ * - -EBUSY is returned if the specified IRQ line is already in use.
+ *
+ * - -ENOSYS is returned if the real-time core is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
+		     rtdm_irq_handler_t handler, unsigned long flags,
+		     const char *device_name, void *arg)
+{
+	return rtdm_irq_request_affine(irq_handle, irq_no, handler, flags,
+				       device_name, arg, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_irq_request);
+
+/**
+ * @brief Register an interrupt handler
+ *
+ * This function registers the provided handler with an IRQ line and enables
+ * the line.
+ *
+ * @param[in,out] irq_handle IRQ handle
+ * @param[in] irq_no Line number of the addressed IRQ
+ * @param[in] handler Interrupt handler
+ * @param[in] flags Registration flags, see @ref RTDM_IRQTYPE_xxx for details
+ * @param[in] device_name Device name to show up in real-time IRQ lists
+ * @param[in] arg Pointer to be passed to the interrupt handler on invocation
+ * @param[in] cpumask CPU affinity of the interrupt
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if an invalid parameter was passed.
+ *
+ * - -EBUSY is returned if the specified IRQ line is already in use.
+ *
+ * - -ENOSYS is returned if the real-time core is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_request_affine(rtdm_irq_t *irq_handle, unsigned int irq_no,
+			    rtdm_irq_handler_t handler, unsigned long flags,
+			    const char *device_name, void *arg,
+			    const cpumask_t *cpumask)
+{
+	int err;
+
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	err = xnintr_init(irq_handle, device_name, irq_no, handler, NULL, flags);
+	if (err)
+		return err;
+
+	err = xnintr_attach(irq_handle, arg, cpumask);
+	if (err) {
+		xnintr_destroy(irq_handle);
+		return err;
+	}
+
+	xnintr_enable(irq_handle);
+
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_irq_request_affine);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+/**
+ * @brief Release an interrupt handler
+ *
+ * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @note The caller is responsible for shutting down the IRQ source at device
+ * level before invoking this service. In turn, rtdm_irq_free ensures that any
+ * pending event on the given IRQ line is fully processed on return from this
+ * service.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_free(rtdm_irq_t *irq_handle);
+
+/**
+ * @brief Enable interrupt line
+ *
+ * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @note This service is for exceptional use only. Drivers should
+ * always prefer interrupt masking at device level (via corresponding
+ * control registers etc.)  over masking at line level. Keep in mind
+ * that the latter is incompatible with IRQ line sharing and can also
+ * be more costly as interrupt controller access requires broader
+ * synchronization. Also, such service is solely available from
+ * secondary mode. The caller is responsible for excluding such
+ * conflicts.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_enable(rtdm_irq_t *irq_handle);
+
+/**
+ * @brief Disable interrupt line
+ *
+ * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @note This service is for exceptional use only. Drivers should
+ * always prefer interrupt masking at device level (via corresponding
+ * control registers etc.)  over masking at line level. Keep in mind
+ * that the latter is incompatible with IRQ line sharing and can also
+ * be more costly as interrupt controller access requires broader
+ * synchronization.  Also, such service is solely available from
+ * secondary mode.  The caller is responsible for excluding such
+ * conflicts.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_disable(rtdm_irq_t *irq_handle);
+
+/**
+ * @brief Set interrupt affinity
+ *
+ * @param[in] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @param[in] cpumask The new CPU affinity of the interrupt
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_set_affinity(rtdm_irq_t *irq_handle, const cpumask_t *cpumask);
+#endif /* DOXYGEN_CPP */
+
+/** @} Interrupt Management Services */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_nrtsignal Non-Real-Time Signalling Services
+ *
+ * These services provide a mechanism to request the execution of a specified
+ * handler in non-real-time context. The triggering can safely be performed in
+ * real-time context without suffering from unknown delays. The handler
+ * execution will be deferred until the next time the real-time subsystem
+ * releases the CPU to the non-real-time part.
+ * @{
+ */
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * @brief Register a non-real-time signal handler
+ *
+ * @param[in,out] nrt_sig Signal handle
+ * @param[in] handler Non-real-time signal handler
+ * @param[in] arg Custom argument passed to @c handler() on each invocation
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EAGAIN is returned if no free signal slot is available.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig, rtdm_nrtsig_handler_t handler,
+		     void *arg);
+
+/**
+ * @brief Release a non-realtime signal handler
+ *
+ * @param[in,out] nrt_sig Signal handle
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig);
+#endif /* DOXYGEN_CPP */
+
+void __rtdm_nrtsig_execute(struct pipeline_inband_work *inband_work)
+{
+	struct rtdm_nrtsig *nrt_sig;
+
+	nrt_sig = container_of(inband_work, typeof(*nrt_sig), inband_work);
+	nrt_sig->handler(nrt_sig, nrt_sig->arg);
+}
+EXPORT_SYMBOL_GPL(__rtdm_nrtsig_execute);
+
+/**
+ * Trigger non-real-time signal
+ *
+ * @param[in,out] nrt_sig Signal handle
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig)
+{
+	pipeline_post_inband_work(nrt_sig);
+}
+EXPORT_SYMBOL_GPL(rtdm_nrtsig_pend);
+
+static LIST_HEAD(nrt_work_list);
+DEFINE_PRIVATE_XNLOCK(nrt_work_lock);
+
+static void lostage_schedule_work(struct pipeline_inband_work *inband_work)
+{
+	struct work_struct *lostage_work;
+	spl_t s;
+
+	xnlock_get_irqsave(&nrt_work_lock, s);
+
+	while (!list_empty(&nrt_work_list)) {
+		lostage_work = list_first_entry(&nrt_work_list,
+						struct work_struct, entry);
+		list_del_init(&lostage_work->entry);
+
+		xnlock_put_irqrestore(&nrt_work_lock, s);
+
+		schedule_work(lostage_work);
+
+		xnlock_get_irqsave(&nrt_work_lock, s);
+	}
+
+	xnlock_put_irqrestore(&nrt_work_lock, s);
+}
+
+static struct lostage_trigger_work {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+} nrt_work =  {
+	.inband_work = PIPELINE_INBAND_WORK_INITIALIZER(nrt_work,
+							lostage_schedule_work),
+};
+
+/**
+ * Put a work task in Linux non real-time global workqueue from primary mode.
+ *
+ * @param lostage_work
+ */
+void rtdm_schedule_nrt_work(struct work_struct *lostage_work)
+{
+	spl_t s;
+
+	if (is_secondary_domain()) {
+		schedule_work(lostage_work);
+		return;
+	}
+
+	xnlock_get_irqsave(&nrt_work_lock, s);
+
+	list_add_tail(&lostage_work->entry, &nrt_work_list);
+	pipeline_post_inband_work(&nrt_work);
+
+	xnlock_put_irqrestore(&nrt_work_lock, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_schedule_nrt_work);
+
+/** @} Non-Real-Time Signalling Services */
+
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_util Utility Services
+ * @{
+ */
+
+struct mmap_tramp_data {
+	struct rtdm_fd *fd;
+	struct file_operations *fops;
+	int (*mmap_handler)(struct rtdm_fd *fd,
+			    struct vm_area_struct *vma);
+};
+
+struct mmap_helper_data {
+	void *src_vaddr;
+	phys_addr_t src_paddr;
+	struct vm_operations_struct *vm_ops;
+	void *vm_private_data;
+	struct mmap_tramp_data tramp_data;
+};
+
+static int mmap_kmem_helper(struct vm_area_struct *vma, void *va)
+{
+	unsigned long addr, len, pfn, to;
+	int ret = 0;
+
+	to = (unsigned long)va;
+	addr = vma->vm_start;
+	len = vma->vm_end - vma->vm_start;
+
+	if (to != PAGE_ALIGN(to) || (len & ~PAGE_MASK) != 0)
+		return -EINVAL;
+
+#ifndef CONFIG_MMU
+	pfn = __pa(to) >> PAGE_SHIFT;
+	ret = remap_pfn_range(vma, addr, pfn, len, PAGE_SHARED);
+#else
+	if (to < VMALLOC_START || to >= VMALLOC_END) {
+		/* logical address. */
+		pfn = __pa(to) >> PAGE_SHIFT;
+		ret = remap_pfn_range(vma, addr, pfn, len, PAGE_SHARED);
+		if (ret)
+			return ret;
+	} else {
+		/* vmalloc memory. */
+		while (len > 0) {
+			struct page *page = vmalloc_to_page((void *)to);
+			if (vm_insert_page(vma, addr, page))
+				return -EAGAIN;
+			addr += PAGE_SIZE;
+			to += PAGE_SIZE;
+			len -= PAGE_SIZE;
+		}
+	}
+
+	if (cobalt_machine.prefault)
+		cobalt_machine.prefault(vma);
+#endif
+
+	return ret;
+}
+
+static int mmap_iomem_helper(struct vm_area_struct *vma, phys_addr_t pa)
+{
+	pgprot_t prot = PAGE_SHARED;
+	unsigned long len;
+
+	len = vma->vm_end - vma->vm_start;
+#ifndef CONFIG_MMU
+	vma->vm_pgoff = pa >> PAGE_SHIFT;
+#endif /* CONFIG_MMU */
+
+#ifdef __HAVE_PHYS_MEM_ACCESS_PROT
+	if (vma->vm_file)
+		prot = phys_mem_access_prot(vma->vm_file, pa >> PAGE_SHIFT,
+					    len, prot);
+#endif
+	vma->vm_page_prot = pgprot_noncached(prot);
+
+	return remap_pfn_range(vma, vma->vm_start, pa >> PAGE_SHIFT,
+			       len, vma->vm_page_prot);
+}
+
+static int mmap_buffer_helper(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct mmap_tramp_data *tramp_data = vma->vm_private_data;
+	struct mmap_helper_data *helper_data;
+	int ret;
+
+	helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data);
+	vma->vm_ops = helper_data->vm_ops;
+	vma->vm_private_data = helper_data->vm_private_data;
+
+	if (helper_data->src_paddr)
+		ret = mmap_iomem_helper(vma, helper_data->src_paddr);
+	else
+		ret = mmap_kmem_helper(vma, helper_data->src_vaddr);
+
+	return ret;
+}
+
+static int mmap_trampoline(struct file *filp, struct vm_area_struct *vma)
+{
+	struct mmap_tramp_data *tramp_data = filp->private_data;
+	int ret;
+
+	vma->vm_private_data = tramp_data;
+
+	ret = tramp_data->mmap_handler(tramp_data->fd, vma);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+#ifndef CONFIG_MMU
+
+static unsigned long
+internal_get_unmapped_area(struct file *filp,
+			   unsigned long addr, unsigned long len,
+			   unsigned long pgoff, unsigned long flags)
+{
+	struct mmap_tramp_data *tramp_data = filp->private_data;
+	struct mmap_helper_data *helper_data;
+	unsigned long pa;
+
+	helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data);
+	pa = helper_data->src_paddr;
+	if (pa)
+		return (unsigned long)__va(pa);
+
+	return (unsigned long)helper_data->src_vaddr;
+}
+
+static int do_rtdm_mmap(struct mmap_tramp_data *tramp_data,
+			size_t len, off_t offset, int prot, int flags,
+			void **pptr)
+{
+	const struct file_operations *old_fops;
+	unsigned long u_addr;
+	struct file *filp;
+
+	filp = filp_open("/dev/mem", O_RDWR, 0);
+	if (IS_ERR(filp))
+		return PTR_ERR(filp);
+
+	old_fops = filp->f_op;
+	filp->f_op = tramp_data->fops;
+	filp->private_data = tramp_data;
+	u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset);
+	filp_close(filp, current->files);
+	filp->f_op = old_fops;
+
+	if (IS_ERR_VALUE(u_addr))
+		return (int)u_addr;
+
+	*pptr = (void *)u_addr;
+
+	return 0;
+}
+
+#else /* CONFIG_MMU */
+
+static int do_rtdm_mmap(struct mmap_tramp_data *tramp_data,
+			size_t len, off_t offset, int prot, int flags,
+			void **pptr)
+{
+	unsigned long u_addr;
+	struct file *filp;
+
+	filp = anon_inode_getfile("[rtdm]", tramp_data->fops, tramp_data, O_RDWR);
+	if (IS_ERR(filp))
+		return PTR_ERR(filp);
+
+	u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset);
+	filp_close(filp, current->files);
+
+	if (IS_ERR_VALUE(u_addr))
+		return (int)u_addr;
+
+	*pptr = (void *)u_addr;
+
+	return 0;
+}
+
+#define internal_get_unmapped_area  NULL
+
+#endif /* CONFIG_MMU */
+
+static struct file_operations internal_mmap_fops = {
+	.mmap = mmap_trampoline,
+	.get_unmapped_area = internal_get_unmapped_area
+};
+
+static unsigned long
+driver_get_unmapped_area(struct file *filp,
+			 unsigned long addr, unsigned long len,
+			 unsigned long pgoff, unsigned long flags)
+{
+	struct mmap_tramp_data *tramp_data = filp->private_data;
+	struct rtdm_fd *fd = tramp_data->fd;
+
+	if (fd->ops->get_unmapped_area)
+		return fd->ops->get_unmapped_area(fd, len, pgoff, flags);
+
+#ifdef CONFIG_MMU
+	/* Run default handler. */
+	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+#else
+	return -ENODEV;
+#endif
+}
+
+static struct file_operations driver_mmap_fops = {
+	.mmap = mmap_trampoline,
+	.get_unmapped_area = driver_get_unmapped_area
+};
+
+int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset,
+			  int prot, int flags, void **pptr)
+{
+	struct mmap_tramp_data tramp_data = {
+		.fd = fd,
+		.fops = &driver_mmap_fops,
+		.mmap_handler = fd->ops->mmap,
+	};
+
+#ifndef CONFIG_MMU
+	/*
+	 * XXX: A .get_unmapped_area handler must be provided in the
+	 * nommu case. We use this to force the memory management code
+	 * not to share VM regions for distinct areas to map to, as it
+	 * would otherwise do since all requests currently apply to
+	 * the same file (i.e. from /dev/mem, see do_mmap_pgoff() in
+	 * the nommu case).
+	 */
+	if (fd->ops->get_unmapped_area)
+		offset = fd->ops->get_unmapped_area(fd, len, 0, flags);
+#endif
+
+	return do_rtdm_mmap(&tramp_data, len, offset, prot, flags, pptr);
+}
+
+/**
+ * Map a kernel memory range into the address space of the user.
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] src_addr Kernel virtual address to be mapped
+ * @param[in] len Length of the memory range
+ * @param[in] prot Protection flags for the user's memory range, typically
+ * either PROT_READ or PROT_READ|PROT_WRITE
+ * @param[in,out] pptr Address of a pointer containing the desired user
+ * address or NULL on entry and the finally assigned address on return
+ * @param[in] vm_ops vm_operations to be executed on the vm_area of the
+ * user memory range or NULL
+ * @param[in] vm_private_data Private data to be stored in the vm_area,
+ * primarily useful for vm_operation handlers
+ *
+ * @return 0 on success, otherwise (most common values):
+ *
+ * - -EINVAL is returned if an invalid start address, size, or destination
+ * address was passed.
+ *
+ * - -ENOMEM is returned if there is insufficient free memory or the limit of
+ * memory mapping for the user process was reached.
+ *
+ * - -EAGAIN is returned if too much memory has been already locked by the
+ * user process.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @note This service only works on memory regions allocated via kmalloc() or
+ * vmalloc(). To map physical I/O memory to user-space use
+ * rtdm_iomap_to_user() instead.
+ *
+ * @note RTDM supports two models for unmapping the memory area:
+ * - manual unmapping via rtdm_munmap(), which may be issued from a
+ * driver in response to an IOCTL call, or by a call to the regular
+ * munmap() call from the application.
+ * - automatic unmapping, triggered by the termination of the process
+ *   which owns the mapping.
+ * To track the number of references pending on the resource mapped,
+ * the driver can pass the address of a close handler for the vm_area
+ * considered, in the @a vm_ops descriptor. See the relevant Linux
+ * kernel programming documentation (e.g. Linux Device Drivers book)
+ * on virtual memory management for details.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_to_user(struct rtdm_fd *fd,
+		      void *src_addr, size_t len,
+		      int prot, void **pptr,
+		      struct vm_operations_struct *vm_ops,
+		      void *vm_private_data)
+{
+	struct mmap_helper_data helper_data = {
+		.tramp_data = {
+			.fd = fd,
+			.fops = &internal_mmap_fops,
+			.mmap_handler = mmap_buffer_helper,
+		},
+		.src_vaddr = src_addr,
+		.src_paddr = 0,
+		.vm_ops = vm_ops,
+		.vm_private_data = vm_private_data
+	};
+
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	return do_rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_to_user);
+
+/**
+ * Map an I/O memory range into the address space of the user.
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] src_addr physical I/O address to be mapped
+ * @param[in] len Length of the memory range
+ * @param[in] prot Protection flags for the user's memory range, typically
+ * either PROT_READ or PROT_READ|PROT_WRITE
+ * @param[in,out] pptr Address of a pointer containing the desired user
+ * address or NULL on entry and the finally assigned address on return
+ * @param[in] vm_ops vm_operations to be executed on the vm_area of the
+ * user memory range or NULL
+ * @param[in] vm_private_data Private data to be stored in the vm_area,
+ * primarily useful for vm_operation handlers
+ *
+ * @return 0 on success, otherwise (most common values):
+ *
+ * - -EINVAL is returned if an invalid start address, size, or destination
+ * address was passed.
+ *
+ * - -ENOMEM is returned if there is insufficient free memory or the limit of
+ * memory mapping for the user process was reached.
+ *
+ * - -EAGAIN is returned if too much memory has been already locked by the
+ * user process.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @note RTDM supports two models for unmapping the memory area:
+ * - manual unmapping via rtdm_munmap(), which may be issued from a
+ * driver in response to an IOCTL call, or by a call to the regular
+ * munmap() call from the application.
+ * - automatic unmapping, triggered by the termination of the process
+ *   which owns the mapping.
+ * To track the number of references pending on the resource mapped,
+ * the driver can pass the address of a close handler for the vm_area
+ * considered, in the @a vm_ops descriptor. See the relevant Linux
+ * kernel programming documentation (e.g. Linux Device Drivers book)
+ * on virtual memory management for details.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_iomap_to_user(struct rtdm_fd *fd,
+		       phys_addr_t src_addr, size_t len,
+		       int prot, void **pptr,
+		       struct vm_operations_struct *vm_ops,
+		       void *vm_private_data)
+{
+	struct mmap_helper_data helper_data = {
+		.tramp_data = {
+			.fd = fd,
+			.fops = &internal_mmap_fops,
+			.mmap_handler = mmap_buffer_helper,
+		},
+		.src_vaddr = NULL,
+		.src_paddr = src_addr,
+		.vm_ops = vm_ops,
+		.vm_private_data = vm_private_data
+	};
+
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	return do_rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr);
+}
+EXPORT_SYMBOL_GPL(rtdm_iomap_to_user);
+
+/**
+ * Map a kernel logical memory range to a virtual user area.
+ *
+ * This routine is commonly used from a ->mmap() handler of a RTDM
+ * driver, for mapping a virtual memory area with a direct physical
+ * mapping over the user address space referred to by @a vma.
+ *
+ * @param[in] vma The VMA descriptor to receive the mapping.
+ * @param[in] va The kernel logical address to be mapped.
+ *
+ * @return 0 on success, otherwise a negated error code is returned.
+ *
+ * @note This service works on memory regions allocated via
+ * kmalloc(). To map a chunk of virtual space with no direct physical
+ * mapping, or a physical I/O memory to a VMA, call rtdm_mmap_vmem()
+ * or rtdm_mmap_iomem() respectively instead.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va)
+{
+	return mmap_kmem_helper(vma, va);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_kmem);
+
+/**
+ * Map a virtual memory range to a virtual user area.
+ *
+ * This routine is commonly used from a ->mmap() handler of a RTDM
+ * driver, for mapping a purely virtual memory area over the user
+ * address space referred to by @a vma.
+ *
+ * @param[in] vma The VMA descriptor to receive the mapping.
+ * @param[in] va The virtual address to be mapped.
+ *
+ * @return 0 on success, otherwise a negated error code is returned.
+ *
+ * @note This service works on memory regions allocated via
+ * vmalloc(). To map a chunk of logical space obtained from kmalloc(),
+ * or a physical I/O memory to a VMA, call rtdm_mmap_kmem() or
+ * rtdm_mmap_iomem() respectively instead.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_vmem(struct vm_area_struct *vma, void *va)
+{
+	/*
+	 * Our helper handles both of directly mapped to physical and
+	 * purely virtual memory ranges.
+	 */
+	return mmap_kmem_helper(vma, va);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_vmem);
+
+/**
+ * Map an I/O memory range to a virtual user area.
+ *
+ * This routine is commonly used from a ->mmap() handler of a RTDM
+ * driver, for mapping an I/O memory area over the user address space
+ * referred to by @a vma.
+ *
+ * @param[in] vma The VMA descriptor to receive the mapping.
+ * @param[in] pa The physical I/O address to be mapped.
+ *
+ * @return 0 on success, otherwise a negated error code is returned.
+ *
+ * @note To map a chunk of logical space obtained from kmalloc(), or a
+ * purely virtual area with no direct physical mapping to a VMA, call
+ * rtdm_mmap_kmem() or rtdm_mmap_vmem() respectively instead.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa)
+{
+	return mmap_iomem_helper(vma, pa);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_iomem);
+
+/**
+ * Unmap a user memory range.
+ *
+ * @param[in] ptr User address or the memory range
+ * @param[in] len Length of the memory range
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if an invalid address or size was passed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_munmap(void *ptr, size_t len)
+{
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	return vm_munmap((unsigned long)ptr, len);
+}
+EXPORT_SYMBOL_GPL(rtdm_munmap);
+
+int rtdm_get_iovec(struct rtdm_fd *fd, struct iovec **iovp,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast)
+{
+	size_t len = sizeof(struct iovec) * msg->msg_iovlen;
+	struct iovec *iov = iov_fast;
+
+	/*
+	 * If the I/O vector doesn't fit in the fast memory, allocate
+	 * a chunk from the system heap which is large enough to hold
+	 * it.
+	 */
+	if (msg->msg_iovlen > RTDM_IOV_FASTMAX) {
+		iov = xnmalloc(len);
+		if (iov == NULL)
+			return -ENOMEM;
+	}
+
+	*iovp = iov;
+
+	if (!rtdm_fd_is_user(fd)) {
+		memcpy(iov, msg->msg_iov, len);
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd))
+		return sys32_get_iovec(iov,
+			       (struct compat_iovec __user *)msg->msg_iov,
+			       msg->msg_iovlen);
+#endif
+
+	return rtdm_copy_from_user(fd, iov, msg->msg_iov, len);
+}
+EXPORT_SYMBOL_GPL(rtdm_get_iovec);
+
+int rtdm_put_iovec(struct rtdm_fd *fd, struct iovec *iov,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast)
+{
+	size_t len = sizeof(iov[0]) * msg->msg_iovlen;
+	int ret;
+
+	if (!rtdm_fd_is_user(fd)) {
+		memcpy(msg->msg_iov, iov, len);
+		ret = 0;
+	} else
+#ifdef CONFIG_XENO_ARCH_SYS3264
+		if (rtdm_fd_is_compat(fd))
+			ret = sys32_put_iovec((struct compat_iovec __user *)msg->msg_iov,
+					      iov, msg->msg_iovlen);
+		else
+#endif
+			ret = rtdm_copy_to_user(fd, msg->msg_iov, iov, len);
+
+	if (iov != iov_fast)
+		xnfree(iov);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_put_iovec);
+
+ssize_t rtdm_get_iov_flatlen(struct iovec *iov, int iovlen)
+{
+	ssize_t len;
+	int nvec;
+
+	/* Return the flattened vector length. */
+	for (len = 0, nvec = 0; nvec < iovlen; nvec++) {
+		ssize_t l = iov[nvec].iov_len;
+		if (l < 0 || len + l < len) /* SuS wants this. */
+			return -EINVAL;
+		len += l;
+	}
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(rtdm_get_iov_flatlen);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * Real-time safe rate-limited message printing on kernel console
+ *
+ * @param[in] format Format string (conforming standard @c printf())
+ * @param ... Arguments referred by @a format
+ *
+ * @return On success, this service returns the number of characters printed.
+ * Otherwise, a negative error code is returned.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_printk_ratelimited(const char *format, ...);
+
+/**
+ * Real-time safe message printing on kernel console
+ *
+ * @param[in] format Format string (conforming standard @c printf())
+ * @param ... Arguments referred by @a format
+ *
+ * @return On success, this service returns the number of characters printed.
+ * Otherwise, a negative error code is returned.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_printk(const char *format, ...);
+
+/**
+ * Allocate memory block
+ *
+ * @param[in] size Requested size of the memory block
+ *
+ * @return The pointer to the allocated block is returned on success, NULL
+ * otherwise.
+ *
+ * @coretags{unrestricted}
+ */
+void *rtdm_malloc(size_t size);
+
+/**
+ * Release real-time memory block
+ *
+ * @param[in] ptr Pointer to memory block as returned by rtdm_malloc()
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_free(void *ptr);
+
+/**
+ * Check if read access to user-space memory block is safe
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] ptr Address of the user-provided memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return Non-zero is return when it is safe to read from the specified
+ * memory block, 0 otherwise.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_read_user_ok(struct rtdm_fd *fd, const void __user *ptr,
+		      size_t size);
+
+/**
+ * Check if read/write access to user-space memory block is safe
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] ptr Address of the user-provided memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return Non-zero is return when it is safe to read from or write to the
+ * specified memory block, 0 otherwise.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_rw_user_ok(struct rtdm_fd *fd, const void __user *ptr,
+		    size_t size);
+
+/**
+ * Copy user-space memory block to specified buffer
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Destination buffer address
+ * @param[in] src Address of the user-space memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note Before invoking this service, verify via rtdm_read_user_ok() that the
+ * provided user-space address can securely be accessed.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_copy_from_user(struct rtdm_fd *fd, void *dst,
+			const void __user *src, size_t size);
+
+/**
+ * Check if read access to user-space memory block and copy it to specified
+ * buffer
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Destination buffer address
+ * @param[in] src Address of the user-space memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note This service is a combination of rtdm_read_user_ok and
+ * rtdm_copy_from_user.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_safe_copy_from_user(struct rtdm_fd *fd, void *dst,
+			     const void __user *src, size_t size);
+
+/**
+ * Copy specified buffer to user-space memory block
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Address of the user-space memory block
+ * @param[in] src Source buffer address
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note Before invoking this service, verify via rtdm_rw_user_ok() that the
+ * provided user-space address can securely be accessed.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_copy_to_user(struct rtdm_fd *fd, void __user *dst,
+		      const void *src, size_t size);
+
+/**
+ * Check if read/write access to user-space memory block is safe and copy
+ * specified buffer to it
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Address of the user-space memory block
+ * @param[in] src Source buffer address
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note This service is a combination of rtdm_rw_user_ok and
+ * rtdm_copy_to_user.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_safe_copy_to_user(struct rtdm_fd *fd, void __user *dst,
+			   const void *src, size_t size);
+
+/**
+ * Copy user-space string to specified buffer
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Destination buffer address
+ * @param[in] src Address of the user-space string
+ * @param[in] count Maximum number of bytes to copy, including the trailing
+ * '0'
+ *
+ * @return Length of the string on success (not including the trailing '0'),
+ * otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note This services already includes a check of the source address,
+ * calling rtdm_read_user_ok() for @a src explicitly is not required.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_strncpy_from_user(struct rtdm_fd *fd, char *dst,
+			   const char __user *src, size_t count);
+
+/**
+ * Test if running in a real-time task
+ *
+ * @return Non-zero is returned if the caller resides in real-time context, 0
+ * otherwise.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_in_rt_context(void);
+
+/**
+ * Test if the caller is capable of running in real-time context
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ *
+ * @return Non-zero is returned if the caller is able to execute in real-time
+ * context (independent of its current execution mode), 0 otherwise.
+ *
+ * @note This function can be used by drivers that provide different
+ * implementations for the same service depending on the execution mode of
+ * the caller. If a caller requests such a service in non-real-time context
+ * but is capable of running in real-time as well, it might be appropriate
+ * for the driver to reject the request via -ENOSYS so that RTDM can switch
+ * the caller and restart the request in real-time context.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_rt_capable(struct rtdm_fd *fd);
+
+/**
+ * Test if the real-time core is available
+ *
+ * @return True if the real-time is available, false if it is disabled or in
+ * error state.
+ *
+ * @note Drivers should query the core state during initialization if they
+ * perform hardware setup operations or interact with RTDM services such as
+ * locks prior to calling an RTDM service that has a built-in state check of
+ * the real-time core (e.g. rtdm_dev_register() or rtdm_task_init()).
+ *
+ * @coretags{unrestricted}
+ */
+bool rtdm_available(void);
+
+#endif /* DOXYGEN_CPP */
+
+/** @} Utility Services */
+++ linux-patched/kernel/xenomai/rtdm/Makefile	2022-03-21 12:58:29.104891741 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/device.c	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENOMAI) += xenomai.o
+
+xenomai-y :=	core.o		\
+		device.o	\
+		drvlib.o	\
+		fd.o		\
+		wrappers.o
+
+ccflags-y += -I$(srctree)/$(src)/.. -I$(srctree)/kernel
+++ linux-patched/kernel/xenomai/rtdm/device.c	2022-03-21 12:58:29.101891770 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/rtdm/wrappers.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Real-Time Driver Model for Xenomai, device management
+ *
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include "rtdm/internal.h"
+#include <cobalt/kernel/init.h>
+#include <trace/events/cobalt-rtdm.h>
+
+/**
+ * @ingroup rtdm
+ * @defgroup rtdm_profiles Device Profiles
+ *
+ * Pre-defined classes of real-time devices
+ *
+ * Device profiles define which operation handlers a driver of a
+ * certain class of devices has to implement, which name or protocol
+ * it has to register, which IOCTLs it has to provide, and further
+ * details. Sub-classes can be defined in order to extend a device
+ * profile with more hardware-specific functions.
+ */
+
+/**
+ * @addtogroup rtdm_driver_interface
+ * @{
+ */
+
+#define RTDM_DEVICE_MAGIC	0x82846877
+
+static struct rb_root protocol_devices;
+
+static DEFINE_MUTEX(register_lock);
+static DECLARE_BITMAP(protocol_devices_minor_map, RTDM_MAX_MINOR);
+
+static struct class *rtdm_class;
+
+static int enosys(void)
+{
+	return -ENOSYS;
+}
+
+void __rtdm_put_device(struct rtdm_device *dev)
+{
+	secondary_mode_only();
+
+	if (atomic_dec_and_test(&dev->refcount))
+		wake_up(&dev->putwq);
+}
+
+static inline xnkey_t get_proto_id(int pf, int type)
+{
+	xnkey_t llpf = (unsigned int)pf;
+	return (llpf << 32) | (unsigned int)type;
+}
+
+struct rtdm_device *__rtdm_get_namedev(const char *path)
+{
+	struct rtdm_device *dev;
+	xnhandle_t handle;
+	int ret;
+
+	secondary_mode_only();
+
+	/* skip common /dev prefix */
+	if (strncmp(path, "/dev/", 5) == 0)
+		path += 5;
+
+	/* skip RTDM devnode root */
+	if (strncmp(path, "rtdm/", 5) == 0)
+		path += 5;
+
+	ret = xnregistry_bind(path, XN_NONBLOCK, XN_RELATIVE, &handle);
+	if (ret)
+		return NULL;
+
+	mutex_lock(&register_lock);
+
+	dev = xnregistry_lookup(handle, NULL);
+	if (dev && dev->magic == RTDM_DEVICE_MAGIC)
+		__rtdm_get_device(dev);
+	else
+		dev = NULL;
+
+	mutex_unlock(&register_lock);
+
+	return dev;
+}
+
+struct rtdm_device *__rtdm_get_protodev(int protocol_family, int socket_type)
+{
+	struct rtdm_device *dev = NULL;
+	struct xnid *xnid;
+	xnkey_t id;
+
+	secondary_mode_only();
+
+	id = get_proto_id(protocol_family, socket_type);
+
+	mutex_lock(&register_lock);
+
+	xnid = xnid_fetch(&protocol_devices, id);
+	if (xnid) {
+		dev = container_of(xnid, struct rtdm_device, proto.id);
+		__rtdm_get_device(dev);
+	}
+
+	mutex_unlock(&register_lock);
+
+	return dev;
+}
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_device_register Device Registration Services
+ * @{
+ */
+
+static char *rtdm_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "rtdm/%s", dev_name(dev));
+}
+
+static ssize_t profile_show(struct device *kdev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+
+	return sprintf(buf, "%d,%d\n",
+		       dev->driver->profile_info.class_id,
+		       dev->driver->profile_info.subclass_id);
+}
+
+static ssize_t refcount_show(struct device *kdev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+
+	return sprintf(buf, "%d\n", atomic_read(&dev->refcount));
+}
+
+#define cat_count(__buf, __str)			\
+	({					\
+		int __ret = sizeof(__str) - 1;	\
+		strcat(__buf, __str);		\
+		__ret;				\
+	})
+
+static ssize_t flags_show(struct device *kdev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+	struct rtdm_driver *drv = dev->driver;
+
+	return sprintf(buf, "%#x\n", drv->device_flags);
+
+}
+
+static ssize_t type_show(struct device *kdev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+	struct rtdm_driver *drv = dev->driver;
+	int ret;
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE)
+		ret = cat_count(buf, "named\n");
+	else
+		ret = cat_count(buf, "protocol\n");
+
+	return ret;
+
+}
+
+#ifdef ATTRIBUTE_GROUPS
+
+static DEVICE_ATTR_RO(profile);
+static DEVICE_ATTR_RO(refcount);
+static DEVICE_ATTR_RO(flags);
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *rtdm_attrs[] = {
+	&dev_attr_profile.attr,
+	&dev_attr_refcount.attr,
+	&dev_attr_flags.attr,
+	&dev_attr_type.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(rtdm);
+
+#else /* !ATTRIBUTE_GROUPS */
+
+/*
+ * Cope with legacy sysfs attributes. Scheduled for removal when 3.10
+ * is at EOL for us.
+ */
+static struct device_attribute rtdm_attrs[] = {
+	DEVICE_ATTR_RO(profile),
+	DEVICE_ATTR_RO(refcount),
+	DEVICE_ATTR_RO(flags),
+	DEVICE_ATTR_RO(type),
+	__ATTR_NULL 
+};
+
+#define dev_groups   dev_attrs
+#define rtdm_groups  rtdm_attrs
+
+#endif /* !ATTRIBUTE_GROUPS */
+
+static int state_change_notifier(struct notifier_block *nb,
+				 unsigned long action, void *data)
+{
+	struct rtdm_driver *drv;
+	int ret;
+
+	drv = container_of(nb, struct rtdm_driver, nb_statechange);
+
+	switch (action) {
+	case COBALT_STATE_WARMUP:
+		if (drv->smops.start == NULL)
+			return NOTIFY_DONE;
+		ret = drv->smops.start(drv);
+		if (ret)
+			printk(XENO_WARNING
+			       "failed starting driver %s (%d)\n",
+			       drv->profile_info.name, ret);
+		break;
+	case COBALT_STATE_TEARDOWN:
+		if (drv->smops.stop == NULL)
+			return NOTIFY_DONE;
+		ret = drv->smops.stop(drv);
+		if (ret)
+			printk(XENO_WARNING
+			       "failed stopping driver %s (%d)\n",
+			       drv->profile_info.name, ret);
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int register_driver(struct rtdm_driver *drv)
+{
+	dev_t rdev;
+	int ret;
+
+	if (drv->profile_info.magic == RTDM_CLASS_MAGIC) {
+		atomic_inc(&drv->refcount);
+		return 0;
+	}
+
+	if (drv->profile_info.magic != ~RTDM_CLASS_MAGIC) {
+		XENO_WARN_ON_ONCE(COBALT, 1);
+		return -EINVAL;
+	}
+
+	switch (drv->device_flags & RTDM_DEVICE_TYPE_MASK) {
+	case RTDM_NAMED_DEVICE:
+	case RTDM_PROTOCOL_DEVICE:
+		break;
+	default:
+		printk(XENO_WARNING "%s has invalid device type (%#x)\n",
+		       drv->profile_info.name,
+		       drv->device_flags & RTDM_DEVICE_TYPE_MASK);
+		return -EINVAL;
+	}
+
+	if (drv->device_count <= 0 ||
+	    drv->device_count > RTDM_MAX_MINOR) {
+		printk(XENO_WARNING "%s has invalid device count (%d)\n",
+		       drv->profile_info.name, drv->device_count);
+		return -EINVAL;
+	}
+
+	if ((drv->device_flags & RTDM_NAMED_DEVICE) == 0)
+		goto done;
+
+	if (drv->base_minor < 0 ||
+	    drv->base_minor >= RTDM_MAX_MINOR) {
+		printk(XENO_WARNING "%s has invalid base minor (%d)\n",
+		       drv->profile_info.name, drv->base_minor);
+		return -EINVAL;
+	}
+
+	ret = alloc_chrdev_region(&rdev, drv->base_minor, drv->device_count,
+				  drv->profile_info.name);
+	if (ret) {
+		printk(XENO_WARNING "cannot allocate chrdev region %s[%d..%d]\n",
+		       drv->profile_info.name, drv->base_minor,
+		       drv->base_minor + drv->device_count - 1);
+		return ret;
+	}
+
+	cdev_init(&drv->named.cdev, &rtdm_dumb_fops);
+	ret = cdev_add(&drv->named.cdev, rdev, drv->device_count);
+	if (ret) {
+		printk(XENO_WARNING "cannot create cdev series for %s\n",
+		       drv->profile_info.name);
+		goto fail_cdev;
+	}
+
+	drv->named.major = MAJOR(rdev);
+	bitmap_zero(drv->minor_map, RTDM_MAX_MINOR);
+
+done:
+	atomic_set(&drv->refcount, 1);
+	drv->nb_statechange.notifier_call = state_change_notifier;
+	drv->nb_statechange.priority = 0;
+	cobalt_add_state_chain(&drv->nb_statechange);
+	drv->profile_info.magic = RTDM_CLASS_MAGIC;
+
+	return 0;
+
+fail_cdev:
+	unregister_chrdev_region(rdev, drv->device_count);
+
+	return ret;
+}
+
+static void unregister_driver(struct rtdm_driver *drv)
+{
+	XENO_BUG_ON(COBALT, drv->profile_info.magic != RTDM_CLASS_MAGIC);
+
+	if (!atomic_dec_and_test(&drv->refcount))
+		return;
+
+	cobalt_remove_state_chain(&drv->nb_statechange);
+
+	drv->profile_info.magic = ~RTDM_CLASS_MAGIC;
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE) {
+		cdev_del(&drv->named.cdev);
+		unregister_chrdev_region(MKDEV(drv->named.major, drv->base_minor),
+					 drv->device_count);
+	}
+}
+
+/**
+ * @brief Register a RTDM device
+ *
+ * Registers a device in the RTDM namespace.
+ *
+ * @param[in] dev Device descriptor.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if the descriptor contains invalid
+ * entries. RTDM_PROFILE_INFO() must appear in the list of
+ * initializers for the driver properties.
+ *
+ * - -EEXIST is returned if the specified device name of protocol ID is
+ * already in use.
+ *
+ * - -ENOMEM is returned if a memory allocation failed in the process
+ * of registering the device.
+ *
+ * - -EAGAIN is returned if no registry slot is available (check/raise
+ * CONFIG_XENO_OPT_REGISTRY_NRSLOTS).
+ *
+ * - -ENOSYS is returned if the real-time core is disabled.
+ *
+ * - -ENXIO is returned if no valid minor could be assigned
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_dev_register(struct rtdm_device *dev)
+{
+	struct class *kdev_class = rtdm_class;
+	struct device *kdev = NULL;
+	struct rtdm_driver *drv;
+	int ret, major, minor;
+	xnkey_t id;
+	dev_t rdev;
+	const char *dev_name;
+
+	secondary_mode_only();
+
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	mutex_lock(&register_lock);
+
+	dev->name = NULL;
+	drv = dev->driver;
+	ret = register_driver(drv);
+	if (ret) {
+		mutex_unlock(&register_lock);
+		return ret;
+	}
+
+	dev->ops = drv->ops;
+	if (drv->device_flags & RTDM_NAMED_DEVICE)
+		dev->ops.socket = (typeof(dev->ops.socket))enosys;
+	else
+		dev->ops.open = (typeof(dev->ops.open))enosys;
+
+	INIT_LIST_HEAD(&dev->openfd_list);
+	init_waitqueue_head(&dev->putwq);
+	dev->ops.close = __rtdm_dev_close; /* Interpose on driver's handler. */
+	atomic_set(&dev->refcount, 0);
+
+	if (drv->profile_info.kdev_class)
+		kdev_class = drv->profile_info.kdev_class;
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE) {
+		if (drv->device_flags & RTDM_FIXED_MINOR) {
+			minor = dev->minor;
+			if (minor < 0 ||
+			    minor >= drv->base_minor + drv->device_count) {
+				ret = -ENXIO;
+				goto fail;
+			}
+		} else {
+			minor = find_first_zero_bit(drv->minor_map, RTDM_MAX_MINOR);
+			if (minor >= RTDM_MAX_MINOR) {
+				ret = -ENXIO;
+				goto fail;
+			}
+			dev->minor = minor;
+		}
+
+		major = drv->named.major;
+		dev->name = kasformat(dev->label, minor);
+		if (dev->name == NULL) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+		if (dev->name[0] == '/') {
+			dev_name = dev->name+1;
+		} else {
+			dev_name = dev->name;
+		}
+		ret = xnregistry_enter(dev_name, dev,
+				       &dev->named.handle, NULL);
+		if (ret)
+			goto fail;
+
+		rdev = MKDEV(major, minor);
+		kdev = device_create(kdev_class, NULL, rdev,
+				     dev, kbasename(dev->label), minor);
+		if (IS_ERR(kdev)) {
+			xnregistry_remove(dev->named.handle);
+			ret = PTR_ERR(kdev);
+			goto fail2;
+		}
+		__set_bit(minor, drv->minor_map);
+	} else {
+		minor = find_first_zero_bit(protocol_devices_minor_map,
+					RTDM_MAX_MINOR);
+		if (minor >= RTDM_MAX_MINOR) {
+			ret = -ENXIO;
+			goto fail;
+		}
+		dev->minor = minor;
+
+		dev->name = kstrdup(dev->label, GFP_KERNEL);
+		if (dev->name == NULL) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		rdev = MKDEV(0, minor);
+		kdev = device_create(kdev_class, NULL, rdev,
+				     dev, dev->name);
+		if (IS_ERR(kdev)) {
+			ret = PTR_ERR(kdev);
+			goto fail2;
+		}
+
+		id = get_proto_id(drv->protocol_family, drv->socket_type);
+		ret = xnid_enter(&protocol_devices, &dev->proto.id, id);
+		if (ret < 0)
+			goto fail;
+		__set_bit(minor, protocol_devices_minor_map);
+	}
+
+	dev->rdev = rdev;
+	dev->kdev = kdev;
+	dev->magic = RTDM_DEVICE_MAGIC;
+	dev->kdev_class = kdev_class;
+
+	mutex_unlock(&register_lock);
+
+	trace_cobalt_device_register(dev);
+
+	return 0;
+fail:
+	if (kdev)
+		device_destroy(kdev_class, rdev);
+fail2:
+	unregister_driver(drv);
+
+	mutex_unlock(&register_lock);
+
+	if (dev->name)
+		kfree(dev->name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_dev_register);
+
+/**
+ * @brief Unregister a RTDM device
+ *
+ * Removes the device from the RTDM namespace. This routine first
+ * attempts to teardown all active connections to the @a device prior
+ * to unregistering.
+ *
+ * @param[in] dev Device descriptor.
+ *
+ * @coretags{secondary-only}
+ */
+void rtdm_dev_unregister(struct rtdm_device *dev)
+{
+	struct rtdm_driver *drv = dev->driver;
+
+	secondary_mode_only();
+
+	trace_cobalt_device_unregister(dev);
+
+	/* Lock out any further connection. */
+	dev->magic = ~RTDM_DEVICE_MAGIC;
+
+	/* Flush all fds from this device. */
+	rtdm_device_flush_fds(dev);
+
+	/* Then wait for the ongoing connections to finish. */
+	wait_event(dev->putwq,
+		   atomic_read(&dev->refcount) == 0);
+
+	mutex_lock(&register_lock);
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE) {
+		xnregistry_remove(dev->named.handle);
+		__clear_bit(dev->minor, drv->minor_map);
+	} else {
+		xnid_remove(&protocol_devices, &dev->proto.id);
+		__clear_bit(dev->minor, protocol_devices_minor_map);
+	}
+
+	device_destroy(dev->kdev_class, dev->rdev);
+
+	unregister_driver(drv);
+
+	mutex_unlock(&register_lock);
+
+	kfree(dev->name);
+}
+EXPORT_SYMBOL_GPL(rtdm_dev_unregister);
+
+/**
+ * @brief Set the kernel device class of a RTDM driver.
+ *
+ * Set the kernel device class assigned to the RTDM driver. By
+ * default, RTDM drivers belong to Linux's "rtdm" device class,
+ * creating a device node hierarchy rooted at /dev/rtdm, and sysfs
+ * nodes under /sys/class/rtdm.
+ *
+ * This call assigns a user-defined kernel device class to the RTDM
+ * driver, so that its devices are created into a different system
+ * hierarchy.
+ *
+ * rtdm_drv_set_sysclass() is meaningful only before the first device
+ * which is attached to @a drv is registered by a call to
+ * rtdm_dev_register().
+ *
+ * @param[in] drv Address of the RTDM driver descriptor.
+ *
+ * @param[in] cls Pointer to the kernel device class. NULL is allowed
+ * to clear a previous setting, switching back to the default "rtdm"
+ * device class.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EBUSY is returned if the kernel device class has already been
+ * set for @a drv, or some device(s) attached to @a drv are currently
+ * registered.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @attention The kernel device class set by this call is not related to
+ * the RTDM class identification as defined by the @ref rtdm_profiles
+ * "RTDM profiles" in any way. This is strictly related to the Linux
+ * kernel device hierarchy.
+ */
+int rtdm_drv_set_sysclass(struct rtdm_driver *drv, struct class *cls)
+{
+	if ((cls && drv->profile_info.kdev_class) ||
+	    atomic_read(&drv->refcount))
+		return -EBUSY;
+
+	drv->profile_info.kdev_class = cls;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_drv_set_sysclass);
+
+/** @} */
+
+int __init rtdm_init(void)
+{
+	xntree_init(&protocol_devices);
+
+	rtdm_class = class_create(THIS_MODULE, "rtdm");
+	if (IS_ERR(rtdm_class)) {
+		printk(XENO_ERR "cannot create RTDM sysfs class\n");
+		return PTR_ERR(rtdm_class);
+	}
+	rtdm_class->dev_groups = rtdm_groups;
+	rtdm_class->devnode = rtdm_devnode;
+
+	bitmap_zero(protocol_devices_minor_map, RTDM_MAX_MINOR);
+
+	return 0;
+}
+
+void rtdm_cleanup(void)
+{
+	class_destroy(rtdm_class);
+	/*
+	 * NOTE: no need to flush the cleanup_queue as no device is
+	 * allowed to unregister as long as there are references.
+	 */
+}
+
+/** @} */
+++ linux-patched/kernel/xenomai/rtdm/wrappers.c	2022-03-21 12:58:29.097891809 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/mqueue.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (c) 2013  Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (c) 2013  Hannes Frederic Sowa <hannes@stressinduktion.org>
+ * Copyright (c) 2014  Luis R. Rodriguez <mcgrof@do-not-panic.com>
+ *
+ * Backport functionality introduced in Linux 3.13.
+ *
+ * Copyright (c) 2014  Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Backport functionality introduced in Linux 3.14.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+#include <asm/xenomai/wrappers.h>
+
+/*
+ * Same rules as kernel/cobalt/include/asm-generic/xenomai/wrappers.h
+ * apply to reduce #ifdefery.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
+#ifdef CONFIG_PCI_MSI
+int pci_enable_msix_range(struct pci_dev *dev,
+			struct msix_entry *entries,
+			int minvec, int maxvec)
+{
+	int nvec = maxvec;
+	int rc;
+
+	if (maxvec < minvec)
+		return -ERANGE;
+
+	do {
+		rc = pci_enable_msix(dev, entries, nvec);
+		if (rc < 0) {
+			return rc;
+		} else if (rc > 0) {
+			if (rc < minvec)
+				return -ENOSPC;
+			nvec = rc;
+		}
+	} while (rc);
+
+	return nvec;
+}
+EXPORT_SYMBOL(pci_enable_msix_range);
+#endif
+#endif /* < 3.14 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)
+#ifdef CONFIG_HWMON
+struct device*
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups)
+{
+	struct device *hwdev;
+
+	hwdev = hwmon_device_register(dev);
+	hwdev->groups = groups;
+	dev_set_drvdata(hwdev, drvdata);
+	return hwdev;
+}
+
+static void devm_hwmon_release(struct device *dev, void *res)
+{
+	struct device *hwdev = *(struct device **)res;
+
+	hwmon_device_unregister(hwdev);
+}
+
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups)
+{
+	struct device **ptr, *hwdev;
+
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
+	ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups);
+	if (IS_ERR(hwdev))
+		goto error;
+
+	*ptr = hwdev;
+	devres_add(dev, ptr);
+	return hwdev;
+
+error:
+	devres_free(ptr);
+	return hwdev;
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
+#endif
+#endif /* < 3.13 */
+++ linux-patched/kernel/xenomai/posix/mqueue.h	2022-03-21 12:58:29.092891858 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/sched.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_MQUEUE_H
+#define _COBALT_POSIX_MQUEUE_H
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <xenomai/posix/syscall.h>
+
+struct mq_attr {
+	long mq_flags;
+	long mq_maxmsg;
+	long mq_msgsize;
+	long mq_curmsgs;
+};
+
+int __cobalt_mq_open(const char __user *u_name, int oflags,
+		     mode_t mode, struct mq_attr *attr);
+
+int __cobalt_mq_getattr(mqd_t uqd, struct mq_attr *attr);
+
+int __cobalt_mq_timedsend(mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio, const void __user *u_ts,
+			  int (*fetch_timeout)(struct timespec64 *ts,
+					       const void __user *u_ts));
+
+int __cobalt_mq_timedsend64(mqd_t uqd, const void __user *u_buf, size_t len,
+			    unsigned int prio, const void __user *u_ts);
+
+int __cobalt_mq_timedreceive(mqd_t uqd, void __user *u_buf,
+			     ssize_t *lenp,
+			     unsigned int __user *u_prio,
+			     const void __user *u_ts,
+			     int (*fetch_timeout)(struct timespec64 *ts,
+						  const void __user *u_ts));
+
+int __cobalt_mq_timedreceive64(mqd_t uqd, void __user *u_buf,
+			       ssize_t __user *u_len,
+			       unsigned int __user *u_prio,
+			       const void __user *u_ts);
+
+int __cobalt_mq_notify(mqd_t fd, const struct sigevent *evp);
+
+COBALT_SYSCALL_DECL(mq_open,
+		    (const char __user *u_name, int oflags,
+		     mode_t mode, struct mq_attr __user *u_attr));
+
+COBALT_SYSCALL_DECL(mq_close, (mqd_t uqd));
+
+COBALT_SYSCALL_DECL(mq_unlink, (const char __user *u_name));
+
+COBALT_SYSCALL_DECL(mq_getattr, (mqd_t uqd, struct mq_attr __user *u_attr));
+
+COBALT_SYSCALL_DECL(mq_timedsend,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio, const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_timedsend64,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_timedreceive,
+		    (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_timedreceive64,
+		    (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_notify,
+		    (mqd_t fd, const struct sigevent *__user evp));
+
+#endif /* !_COBALT_POSIX_MQUEUE_H */
+++ linux-patched/kernel/xenomai/posix/sched.h	2022-03-21 12:58:29.088891896 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/internal.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SCHED_H
+#define _COBALT_POSIX_SCHED_H
+
+#include <linux/list.h>
+#include <cobalt/kernel/sched.h>
+#include <xenomai/posix/syscall.h>
+
+struct cobalt_resources;
+struct cobalt_process;
+
+struct cobalt_sched_group {
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	struct xnsched_quota_group quota;
+#endif
+	struct cobalt_resources *scope;
+	int pshared;
+	struct list_head next;
+};
+
+int __cobalt_sched_weightprio(int policy,
+			      const struct sched_param_ex *param_ex);
+
+int __cobalt_sched_setconfig_np(int cpu, int policy,
+				void __user *u_config,
+				size_t len,
+				union sched_config *(*fetch_config)
+				(int policy, const void __user *u_config,
+				 size_t *len),
+				int (*ack_config)(int policy,
+						  const union sched_config *config,
+						  void __user *u_config));
+
+ssize_t __cobalt_sched_getconfig_np(int cpu, int policy,
+				    void __user *u_config,
+				    size_t len,
+				    union sched_config *(*fetch_config)
+				    (int policy, const void __user *u_config,
+				     size_t *len),
+				    ssize_t (*put_config)(int policy,
+							  void __user *u_config, size_t u_len,
+							  const union sched_config *config,
+							  size_t len));
+int cobalt_sched_setscheduler_ex(pid_t pid,
+				 int policy,
+				 const struct sched_param_ex *param_ex,
+				 __u32 __user *u_winoff,
+				 int __user *u_promoted);
+
+int cobalt_sched_getscheduler_ex(pid_t pid,
+				 int *policy_r,
+				 struct sched_param_ex *param_ex);
+
+struct xnsched_class *
+cobalt_sched_policy_param(union xnsched_policy_param *param,
+			  int u_policy, const struct sched_param_ex *param_ex,
+			  xnticks_t *tslice_r);
+
+COBALT_SYSCALL_DECL(sched_yield, (void));
+
+COBALT_SYSCALL_DECL(sched_weightprio,
+		    (int policy, const struct sched_param_ex __user *u_param));
+
+COBALT_SYSCALL_DECL(sched_minprio, (int policy));
+
+COBALT_SYSCALL_DECL(sched_maxprio, (int policy));
+
+COBALT_SYSCALL_DECL(sched_setconfig_np,
+		    (int cpu,
+		     int policy,
+		     union sched_config __user *u_config,
+		     size_t len));
+
+COBALT_SYSCALL_DECL(sched_getconfig_np,
+		    (int cpu, int policy,
+		     union sched_config __user *u_config,
+		     size_t len));
+
+COBALT_SYSCALL_DECL(sched_setscheduler_ex,
+		    (pid_t pid,
+		     int policy,
+		     const struct sched_param_ex __user *u_param,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted));
+
+COBALT_SYSCALL_DECL(sched_getscheduler_ex,
+		    (pid_t pid,
+		     int __user *u_policy,
+		     struct sched_param_ex __user *u_param));
+
+void cobalt_sched_reclaim(struct cobalt_process *process);
+
+#endif /* !_COBALT_POSIX_SCHED_H */
+++ linux-patched/kernel/xenomai/posix/internal.h	2022-03-21 12:58:29.085891926 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/mutex.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_INTERNAL_H
+#define _COBALT_POSIX_INTERNAL_H
+
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/arith.h>
+#include <asm/xenomai/syscall.h>
+#include "process.h"
+#include "extension.h"
+#include "syscall.h"
+#include "memory.h"
+
+#define COBALT_MAXNAME		64
+#define COBALT_PERMS_MASK	(O_RDONLY | O_WRONLY | O_RDWR)
+
+#define COBALT_MAGIC(n)		(0x8686##n##n)
+#define COBALT_ANY_MAGIC	COBALT_MAGIC(00)
+#define COBALT_THREAD_MAGIC	COBALT_MAGIC(01)
+#define COBALT_MQ_MAGIC		COBALT_MAGIC(0A)
+#define COBALT_MQD_MAGIC	COBALT_MAGIC(0B)
+#define COBALT_EVENT_MAGIC	COBALT_MAGIC(0F)
+#define COBALT_MONITOR_MAGIC	COBALT_MAGIC(10)
+#define COBALT_TIMERFD_MAGIC	COBALT_MAGIC(11)
+
+#define cobalt_obj_active(h,m,t)	\
+	((h) && ((t *)(h))->magic == (m))
+
+#define cobalt_mark_deleted(t) ((t)->magic = ~(t)->magic)
+
+extern struct xnptree posix_ptree;
+
+static inline xnhandle_t cobalt_get_handle_from_user(xnhandle_t *u_h)
+{
+	xnhandle_t handle;
+	return __xn_get_user(handle, u_h) ? 0 : handle;
+}
+
+int cobalt_init(void);
+
+long cobalt_restart_syscall_placeholder(struct restart_block *param);
+
+#endif /* !_COBALT_POSIX_INTERNAL_H */
+++ linux-patched/kernel/xenomai/posix/mutex.h	2022-03-21 12:58:29.081891965 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/signal.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_MUTEX_H
+#define _COBALT_POSIX_MUTEX_H
+
+#include "thread.h"
+#include <cobalt/uapi/mutex.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_process;
+
+struct cobalt_mutex {
+	unsigned int magic;
+	struct xnsynch synchbase;
+	/** cobalt_mutexq */
+	struct list_head conds;
+	struct cobalt_mutexattr attr;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_mutex_timedlock_break(struct cobalt_mutex_shadow __user *u_mx,
+				   const void __user *u_ts,
+				   int (*fetch_timeout)(struct timespec64 *ts,
+							const void __user *u_ts));
+
+int __cobalt_mutex_timedlock64(struct cobalt_mutex_shadow __user *u_mx,
+				   const void __user *u_ts);
+
+int __cobalt_mutex_acquire_unchecked(struct xnthread *cur,
+				     struct cobalt_mutex *mutex,
+				     const struct timespec64 *ts);
+
+COBALT_SYSCALL_DECL(mutex_check_init,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_init,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct cobalt_mutexattr __user *u_attr));
+
+COBALT_SYSCALL_DECL(mutex_destroy,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_trylock,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_lock,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_timedlock,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mutex_timedlock64,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mutex_unlock,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+int cobalt_mutex_release(struct xnthread *cur,
+			 struct cobalt_mutex *mutex);
+
+void cobalt_mutex_reclaim(struct cobalt_resnode *node,
+			  spl_t s);
+
+#endif /* !_COBALT_POSIX_MUTEX_H */
+++ linux-patched/kernel/xenomai/posix/signal.h	2022-03-21 12:58:29.078891994 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/timerfd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SIGNAL_H
+#define _COBALT_POSIX_SIGNAL_H
+
+#include <linux/signal.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/uapi/signal.h>
+#include <xenomai/posix/syscall.h>
+
+struct cobalt_thread;
+
+struct cobalt_sigpending {
+	struct siginfo si;
+	struct list_head next;
+};
+
+static inline
+void cobalt_copy_siginfo(int code,
+			 struct siginfo *__restrict__ dst,
+			 const struct siginfo *__restrict__ src)
+{
+	dst->si_signo = src->si_signo;
+	dst->si_errno = src->si_errno;
+	dst->si_code = code;
+
+	switch (code) {
+	case SI_TIMER:
+		dst->si_tid = src->si_tid;
+		dst->si_overrun = src->si_overrun;
+		dst->si_value = src->si_value;
+		break;
+	case SI_QUEUE:
+	case SI_MESGQ:
+		dst->si_value = src->si_value;
+		fallthrough;
+	case SI_USER:
+		dst->si_pid = src->si_pid;
+		dst->si_uid = src->si_uid;
+	}
+}
+
+int __cobalt_sigwait(sigset_t *set);
+
+int __cobalt_sigtimedwait(sigset_t *set,
+			  const struct timespec64 *timeout,
+			  void __user *u_si,
+			  bool compat);
+
+int __cobalt_sigwaitinfo(sigset_t *set,
+			 void __user *u_si,
+			 bool compat);
+
+int __cobalt_sigqueue(pid_t pid, int sig, const union sigval *value);
+
+int cobalt_signal_send(struct cobalt_thread *thread,
+		       struct cobalt_sigpending *sigp,
+		       int group);
+
+int cobalt_signal_send_pid(pid_t pid,
+			   struct cobalt_sigpending *sigp);
+
+struct cobalt_sigpending *cobalt_signal_alloc(void);
+
+void cobalt_signal_free(struct cobalt_sigpending *sigp);
+
+void cobalt_signal_flush(struct cobalt_thread *thread);
+
+int cobalt_signal_wait(sigset_t *set, struct siginfo *si,
+		       xnticks_t timeout, xntmode_t tmode);
+
+int __cobalt_kill(struct cobalt_thread *thread,
+		  int sig, int group);
+
+COBALT_SYSCALL_DECL(sigwait,
+		    (const sigset_t __user *u_set, int __user *u_sig));
+
+COBALT_SYSCALL_DECL(sigtimedwait,
+		    (const sigset_t __user *u_set,
+		     struct siginfo __user *u_si,
+		     const struct __user_old_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(sigtimedwait64,
+		    (const sigset_t __user *u_set,
+		     struct siginfo __user *u_si,
+		     const struct __kernel_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(sigwaitinfo,
+		    (const sigset_t __user *u_set,
+		     struct siginfo __user *u_si));
+
+COBALT_SYSCALL_DECL(sigpending,
+		    (old_sigset_t __user *u_set));
+
+COBALT_SYSCALL_DECL(kill, (pid_t pid, int sig));
+
+COBALT_SYSCALL_DECL(sigqueue,
+		    (pid_t pid, int sig, const union sigval __user *u_value));
+
+int cobalt_signal_init(void);
+
+void cobalt_signal_cleanup(void);
+
+#endif /* !_COBALT_POSIX_SIGNAL_H */
+++ linux-patched/kernel/xenomai/posix/timerfd.h	2022-03-21 12:58:29.074892033 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/process.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef TIMERFD_H
+#define TIMERFD_H
+
+#include <linux/time.h>
+#include <xenomai/posix/syscall.h>
+
+int __cobalt_timerfd_settime(int fd, int flags,
+			     const struct itimerspec64 *new_value,
+			     struct itimerspec64 *old_value);
+
+int __cobalt_timerfd_gettime(int fd,
+			     struct itimerspec64 *value);
+
+COBALT_SYSCALL_DECL(timerfd_create,
+		    (int clockid, int flags));
+
+COBALT_SYSCALL_DECL(timerfd_settime,
+		    (int fd, int flags,
+		     const struct __user_old_itimerspec __user *new_value,
+		     struct __user_old_itimerspec __user *old_value));
+
+COBALT_SYSCALL_DECL(timerfd_gettime,
+		    (int fd, struct __user_old_itimerspec __user *curr_value));
+
+#endif /* TIMERFD_H */
+++ linux-patched/kernel/xenomai/posix/process.h	2022-03-21 12:58:29.071892062 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/sched.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_PROCESS_H
+#define _COBALT_POSIX_PROCESS_H
+
+#include <linux/list.h>
+#include <linux/bitmap.h>
+#include <pipeline/thread.h>
+#include <cobalt/kernel/ppd.h>
+
+#define NR_PERSONALITIES  4
+#if BITS_PER_LONG < NR_PERSONALITIES
+#error "NR_PERSONALITIES overflows internal bitmap"
+#endif
+
+struct mm_struct;
+struct xnthread_personality;
+struct cobalt_timer;
+
+struct cobalt_resources {
+	struct list_head condq;
+	struct list_head mutexq;
+	struct list_head semq;
+	struct list_head monitorq;
+	struct list_head eventq;
+	struct list_head schedq;
+};
+
+struct cobalt_process {
+	struct mm_struct *mm;
+	struct hlist_node hlink;
+	struct cobalt_ppd sys_ppd;
+	unsigned long permap;
+	struct rb_root usems;
+	struct list_head sigwaiters;
+	struct cobalt_resources resources;
+	struct list_head thread_list;
+	DECLARE_BITMAP(timers_map, CONFIG_XENO_OPT_NRTIMERS);
+	struct cobalt_timer *timers[CONFIG_XENO_OPT_NRTIMERS];
+	void *priv[NR_PERSONALITIES];
+	int ufeatures;
+	unsigned int debugged_threads;
+};
+
+struct cobalt_resnode {
+	struct cobalt_resources *scope;
+	struct cobalt_process *owner;
+	struct list_head next;
+	xnhandle_t handle;
+};
+
+int cobalt_register_personality(struct xnthread_personality *personality);
+
+int cobalt_unregister_personality(int xid);
+
+struct xnthread_personality *cobalt_push_personality(int xid);
+
+void cobalt_pop_personality(struct xnthread_personality *prev);
+
+int cobalt_bind_core(int ufeatures);
+
+int cobalt_bind_personality(unsigned int magic);
+
+struct cobalt_process *cobalt_search_process(struct mm_struct *mm);
+
+int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff);
+
+void *cobalt_get_context(int xid);
+
+int cobalt_yield(xnticks_t min, xnticks_t max);
+
+int cobalt_process_init(void);
+
+extern struct list_head cobalt_global_thread_list;
+
+extern struct cobalt_resources cobalt_global_resources;
+
+static inline struct cobalt_process *cobalt_current_process(void)
+{
+	return pipeline_current()->process;
+}
+
+static inline struct cobalt_process *
+cobalt_set_process(struct cobalt_process *process)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+	struct cobalt_process *old;
+
+	old = p->process;
+	p->process = process;
+
+	return old;
+}
+
+static inline struct cobalt_ppd *cobalt_ppd_get(int global)
+{
+	struct cobalt_process *process;
+
+	if (global || (process = cobalt_current_process()) == NULL)
+		return &cobalt_kernel_ppd;
+
+	return &process->sys_ppd;
+}
+
+static inline struct cobalt_resources *cobalt_current_resources(int pshared)
+{
+	struct cobalt_process *process;
+
+	if (pshared || (process = cobalt_current_process()) == NULL)
+		return &cobalt_global_resources;
+
+	return &process->resources;
+}
+
+static inline
+void __cobalt_add_resource(struct cobalt_resnode *node, int pshared)
+{
+	node->owner = cobalt_current_process();
+	node->scope = cobalt_current_resources(pshared);
+}
+
+#define cobalt_add_resource(__node, __type, __pshared)			\
+	do {								\
+		__cobalt_add_resource(__node, __pshared);		\
+		list_add_tail(&(__node)->next,				\
+			      &((__node)->scope)->__type ## q);		\
+	} while (0)
+
+static inline
+void cobalt_del_resource(struct cobalt_resnode *node)
+{
+	list_del(&node->next);
+}
+
+void cobalt_remove_process(struct cobalt_process *process);
+
+void cobalt_signal_yield(void);
+
+void cobalt_stop_debugged_process(struct xnthread *thread);
+
+void cobalt_register_debugged_thread(struct xnthread *thread);
+
+void cobalt_unregister_debugged_thread(struct xnthread *thread);
+
+extern struct xnthread_personality *cobalt_personalities[];
+
+extern struct xnthread_personality cobalt_personality;
+
+int cobalt_handle_setaffinity_event(struct task_struct *task);
+
+#ifdef CONFIG_SMP
+void cobalt_adjust_affinity(struct task_struct *task);
+#else
+static inline void cobalt_adjust_affinity(struct task_struct *task) { }
+#endif
+
+int cobalt_handle_taskexit_event(struct task_struct *task);
+
+int cobalt_handle_cleanup_event(struct mm_struct *mm);
+
+int cobalt_handle_user_return(struct task_struct *task);
+
+#endif /* !_COBALT_POSIX_PROCESS_H */
+++ linux-patched/kernel/xenomai/posix/sched.c	2022-03-21 12:58:29.067892101 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/sem.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+
+struct xnsched_class *
+cobalt_sched_policy_param(union xnsched_policy_param *param,
+			  int u_policy, const struct sched_param_ex *param_ex,
+			  xnticks_t *tslice_r)
+{
+	struct xnsched_class *sched_class;
+	int prio, policy;
+	xnticks_t tslice;
+
+	prio = param_ex->sched_priority;
+	tslice = XN_INFINITE;
+	policy = u_policy;
+
+	/*
+	 * NOTE: The user-defined policy may be different than ours,
+	 * e.g. SCHED_FIFO,prio=-7 from userland would be interpreted
+	 * as SCHED_WEAK,prio=7 in kernel space.
+	 */
+	if (prio < 0) {
+		prio = -prio;
+		policy = SCHED_WEAK;
+	}
+	sched_class = &xnsched_class_rt;
+	param->rt.prio = prio;
+
+	switch (policy) {
+	case SCHED_NORMAL:
+		if (prio)
+			return NULL;
+		/*
+		 * When the weak scheduling class is compiled in,
+		 * SCHED_WEAK and SCHED_NORMAL threads are scheduled
+		 * by xnsched_class_weak, at their respective priority
+		 * levels. Otherwise, SCHED_NORMAL is scheduled by
+		 * xnsched_class_rt at priority level #0.
+		 */
+		fallthrough;
+	case SCHED_WEAK:
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+		if (prio < XNSCHED_WEAK_MIN_PRIO ||
+		    prio > XNSCHED_WEAK_MAX_PRIO)
+			return NULL;
+		param->weak.prio = prio;
+		sched_class = &xnsched_class_weak;
+#else
+		if (prio)
+			return NULL;
+#endif
+		break;
+	case SCHED_RR:
+		/* if unspecified, use current one. */
+		tslice = u_ts2ns(&param_ex->sched_rr_quantum);
+		if (tslice == XN_INFINITE && tslice_r)
+			tslice = *tslice_r;
+		fallthrough;
+	case SCHED_FIFO:
+		if (prio < XNSCHED_FIFO_MIN_PRIO ||
+		    prio > XNSCHED_FIFO_MAX_PRIO)
+			return NULL;
+		break;
+	case SCHED_COBALT:
+		if (prio < XNSCHED_CORE_MIN_PRIO ||
+		    prio > XNSCHED_CORE_MAX_PRIO)
+			return NULL;
+		break;
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	case SCHED_SPORADIC:
+		param->pss.normal_prio = param_ex->sched_priority;
+		param->pss.low_prio = param_ex->sched_ss_low_priority;
+		param->pss.current_prio = param->pss.normal_prio;
+		param->pss.init_budget = u_ts2ns(&param_ex->sched_ss_init_budget);
+		param->pss.repl_period = u_ts2ns(&param_ex->sched_ss_repl_period);
+		param->pss.max_repl = param_ex->sched_ss_max_repl;
+		sched_class = &xnsched_class_sporadic;
+		break;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	case SCHED_TP:
+		param->tp.prio = param_ex->sched_priority;
+		param->tp.ptid = param_ex->sched_tp_partition;
+		sched_class = &xnsched_class_tp;
+		break;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	case SCHED_QUOTA:
+		param->quota.prio = param_ex->sched_priority;
+		param->quota.tgid = param_ex->sched_quota_group;
+		sched_class = &xnsched_class_quota;
+		break;
+#endif
+	default:
+		return NULL;
+	}
+
+	if (tslice_r)
+		*tslice_r = tslice;
+
+	return sched_class;
+}
+
+COBALT_SYSCALL(sched_minprio, current, (int policy))
+{
+	int ret;
+
+	switch (policy) {
+	case SCHED_FIFO:
+	case SCHED_RR:
+	case SCHED_SPORADIC:
+	case SCHED_TP:
+	case SCHED_QUOTA:
+		ret = XNSCHED_FIFO_MIN_PRIO;
+		break;
+	case SCHED_COBALT:
+		ret = XNSCHED_CORE_MIN_PRIO;
+		break;
+	case SCHED_NORMAL:
+	case SCHED_WEAK:
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	trace_cobalt_sched_min_prio(policy, ret);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_maxprio, current, (int policy))
+{
+	int ret;
+
+	switch (policy) {
+	case SCHED_FIFO:
+	case SCHED_RR:
+	case SCHED_SPORADIC:
+	case SCHED_TP:
+	case SCHED_QUOTA:
+		ret = XNSCHED_FIFO_MAX_PRIO;
+		break;
+	case SCHED_COBALT:
+		ret = XNSCHED_CORE_MAX_PRIO;
+		break;
+	case SCHED_NORMAL:
+		ret = 0;
+		break;
+	case SCHED_WEAK:
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+		ret = XNSCHED_FIFO_MAX_PRIO;
+#else
+		ret = 0;
+#endif
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	trace_cobalt_sched_max_prio(policy, ret);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_yield, primary, (void))
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+	int ret = 0;
+
+	trace_cobalt_pthread_yield(0);
+
+	/* Maybe some extension wants to handle this. */
+  	if (cobalt_call_extension(sched_yield, &curr->extref, ret) && ret)
+		return ret > 0 ? 0 : ret;
+
+	xnthread_resume(&curr->threadbase, 0);
+	if (xnsched_run())
+		return 0;
+
+	/*
+	 * If the round-robin move did not beget any context switch to
+	 * a thread running in primary mode, then wait for the next
+	 * linux context switch to happen.
+	 *
+	 * Rationale: it is most probably unexpected that
+	 * sched_yield() does not cause any context switch, since this
+	 * service is commonly used for implementing a poor man's
+	 * cooperative scheduling. By waiting for a context switch to
+	 * happen in the regular kernel, we guarantee that the CPU has
+	 * been relinquished for a while.
+	 *
+	 * Typically, this behavior allows a thread running in primary
+	 * mode to effectively yield the CPU to a thread of
+	 * same/higher priority stuck in secondary mode.
+	 *
+	 * NOTE: calling cobalt_yield() with no timeout
+	 * (i.e. XN_INFINITE) is probably never a good idea. This
+	 * means that a SCHED_FIFO non-rt thread stuck in a tight loop
+	 * would prevent the caller from waking up, since no
+	 * linux-originated schedule event would happen for unblocking
+	 * it on the current CPU. For this reason, we pass the
+	 * arbitrary TICK_NSEC value to limit the wait time to a
+	 * reasonable amount.
+	 */
+	return cobalt_yield(TICK_NSEC, TICK_NSEC);
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+
+static inline
+int set_tp_config(int cpu, union sched_config *config, size_t len)
+{
+	xnticks_t offset, duration, next_offset;
+	struct xnsched_tp_schedule *gps, *ogps;
+	struct xnsched_tp_window *w;
+	struct sched_tp_window *p;
+	struct xnsched *sched;
+	spl_t s;
+	int n;
+
+	if (len < sizeof(config->tp))
+		return -EINVAL;
+
+	sched = xnsched_struct(cpu);
+
+	switch (config->tp.op) {
+	case sched_tp_install:
+		if (config->tp.nr_windows > 0)
+			break;
+		fallthrough;
+	case sched_tp_uninstall:
+		gps = NULL;
+		goto set_schedule;
+	case sched_tp_start:
+		xnlock_get_irqsave(&nklock, s);
+		xnsched_tp_start_schedule(sched);
+		xnlock_put_irqrestore(&nklock, s);
+		return 0;
+	case sched_tp_stop:
+		xnlock_get_irqsave(&nklock, s);
+		xnsched_tp_stop_schedule(sched);
+		xnlock_put_irqrestore(&nklock, s);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	/* Install a new TP schedule on CPU. */
+
+	gps = xnmalloc(sizeof(*gps) + config->tp.nr_windows * sizeof(*w));
+	if (gps == NULL)
+		return -ENOMEM;
+
+	for (n = 0, p = config->tp.windows, w = gps->pwins, next_offset = 0;
+	     n < config->tp.nr_windows; n++, p++, w++) {
+		/*
+		 * Time windows must be strictly contiguous. Holes may
+		 * be defined using windows assigned to the pseudo
+		 * partition #-1.
+		 */
+		offset = u_ts2ns(&p->offset);
+		if (offset != next_offset)
+			goto cleanup_and_fail;
+
+		duration = u_ts2ns(&p->duration);
+		if (duration <= 0)
+			goto cleanup_and_fail;
+
+		if (p->ptid < -1 ||
+		    p->ptid >= CONFIG_XENO_OPT_SCHED_TP_NRPART)
+			goto cleanup_and_fail;
+
+		w->w_offset = next_offset;
+		w->w_part = p->ptid;
+		next_offset += duration;
+	}
+
+	atomic_set(&gps->refcount, 1);
+	gps->pwin_nr = n;
+	gps->tf_duration = next_offset;
+set_schedule:
+	xnlock_get_irqsave(&nklock, s);
+	ogps = xnsched_tp_set_schedule(sched, gps);
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (ogps)
+		xnsched_tp_put_schedule(ogps);
+
+	return 0;
+
+cleanup_and_fail:
+	xnfree(gps);
+
+	return -EINVAL;
+}
+
+static inline
+ssize_t get_tp_config(int cpu, void __user *u_config, size_t len,
+		      union sched_config *(*fetch_config)
+		      (int policy, const void __user *u_config,
+		       size_t *len),
+		      ssize_t (*put_config)(int policy, void __user *u_config,
+					    size_t u_len,
+					    const union sched_config *config,
+					    size_t len))
+{
+	struct xnsched_tp_window *pw, *w;
+	struct xnsched_tp_schedule *gps;
+	struct sched_tp_window *pp, *p;
+	union sched_config *config;
+	struct xnsched *sched;
+	ssize_t ret, elen;
+	spl_t s;
+	int n;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sched = xnsched_struct(cpu);
+	gps = xnsched_tp_get_schedule(sched);
+	if (gps == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return 0;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	elen = sched_tp_confsz(gps->pwin_nr);
+	config = xnmalloc(elen);
+	if (config == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	config->tp.op = sched_tp_install;
+	config->tp.nr_windows = gps->pwin_nr;
+	for (n = 0, pp = p = config->tp.windows, pw = w = gps->pwins;
+	     n < gps->pwin_nr; pp = p, p++, pw = w, w++, n++) {
+		u_ns2ts(&p->offset, w->w_offset);
+		u_ns2ts(&pp->duration, w->w_offset - pw->w_offset);
+		p->ptid = w->w_part;
+	}
+	u_ns2ts(&pp->duration, gps->tf_duration - pw->w_offset);
+	ret = put_config(SCHED_TP, u_config, len, config, elen);
+	xnfree(config);
+out:
+	xnsched_tp_put_schedule(gps);
+
+	return ret;
+}
+
+#else /* !CONFIG_XENO_OPT_SCHED_TP */
+
+static inline int
+set_tp_config(int cpu, union sched_config *config, size_t len)
+{
+	return -EINVAL;
+}
+
+static inline ssize_t
+get_tp_config(int cpu, union sched_config __user *u_config, size_t len,
+	      union sched_config *(*fetch_config)
+	      (int policy, const void __user *u_config,
+	       size_t *len),
+	      ssize_t (*put_config)(int policy, void __user *u_config,
+				    size_t u_len,
+				    const union sched_config *config,
+				    size_t len))
+{
+	return -EINVAL;
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_TP */
+
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+
+static inline
+int set_quota_config(int cpu, union sched_config *config, size_t len)
+{
+	struct __sched_config_quota *p = &config->quota;
+	struct __sched_quota_info *iq = &p->info;
+	struct cobalt_sched_group *group;
+	struct xnsched_quota_group *tg;
+	struct xnsched *sched;
+	int ret, quota_sum;
+	spl_t s;
+
+	if (len < sizeof(*p))
+		return -EINVAL;
+
+	switch (p->op) {
+	case sched_quota_add:
+		group = xnmalloc(sizeof(*group));
+		if (group == NULL)
+			return -ENOMEM;
+		tg = &group->quota;
+		group->pshared = p->add.pshared != 0;
+		group->scope = cobalt_current_resources(group->pshared);
+		xnlock_get_irqsave(&nklock, s);
+		sched = xnsched_struct(cpu);
+		ret = xnsched_quota_create_group(tg, sched, &quota_sum);
+		if (ret) {
+			xnlock_put_irqrestore(&nklock, s);
+			xnfree(group);
+			return ret;
+		}
+		list_add(&group->next, &group->scope->schedq);
+		xnlock_put_irqrestore(&nklock, s);
+		break;
+	case sched_quota_remove:
+	case sched_quota_force_remove:
+		xnlock_get_irqsave(&nklock, s);
+		sched = xnsched_struct(cpu);
+		tg = xnsched_quota_find_group(sched, p->remove.tgid);
+		if (tg == NULL)
+			goto bad_tgid;
+		group = container_of(tg, struct cobalt_sched_group, quota);
+		if (group->scope != cobalt_current_resources(group->pshared))
+			goto bad_tgid;
+		ret = xnsched_quota_destroy_group(tg,
+						  p->op == sched_quota_force_remove,
+						  &quota_sum);
+		if (ret) {
+			xnlock_put_irqrestore(&nklock, s);
+			return ret;
+		}
+		list_del(&group->next);
+		xnlock_put_irqrestore(&nklock, s);
+		iq->tgid = tg->tgid;
+		iq->quota = tg->quota_percent;
+		iq->quota_peak = tg->quota_peak_percent;
+		iq->quota_sum = quota_sum;
+		xnfree(group);
+		return 0;
+	case sched_quota_set:
+		xnlock_get_irqsave(&nklock, s);
+		sched = xnsched_struct(cpu);
+		tg = xnsched_quota_find_group(sched, p->set.tgid);
+		if (tg == NULL)
+			goto bad_tgid;
+		group = container_of(tg, struct cobalt_sched_group, quota);
+		if (group->scope != cobalt_current_resources(group->pshared))
+			goto bad_tgid;
+		xnsched_quota_set_limit(tg, p->set.quota, p->set.quota_peak,
+					&quota_sum);
+		xnlock_put_irqrestore(&nklock, s);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	iq->tgid = tg->tgid;
+	iq->quota = tg->quota_percent;
+	iq->quota_peak = tg->quota_peak_percent;
+	iq->quota_sum = quota_sum;
+
+	return 0;
+bad_tgid:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return -ESRCH;
+}
+
+static inline
+ssize_t get_quota_config(int cpu, void __user *u_config, size_t len,
+			 union sched_config *(*fetch_config)
+			 (int policy, const void __user *u_config,
+			  size_t *len),
+			 ssize_t (*put_config)(int policy, void __user *u_config,
+					       size_t u_len,
+					       const union sched_config *config,
+					       size_t len))
+{
+	struct cobalt_sched_group *group;
+	struct xnsched_quota_group *tg;
+	union sched_config *config;
+	struct xnsched *sched;
+	ssize_t ret;
+	spl_t s;
+
+	config = fetch_config(SCHED_QUOTA, u_config, &len);
+	if (IS_ERR(config))
+		return PTR_ERR(config);
+
+	xnlock_get_irqsave(&nklock, s);
+	sched = xnsched_struct(cpu);
+	tg = xnsched_quota_find_group(sched, config->quota.get.tgid);
+	if (tg == NULL)
+		goto bad_tgid;
+
+	group = container_of(tg, struct cobalt_sched_group, quota);
+	if (group->scope != cobalt_current_resources(group->pshared))
+		goto bad_tgid;
+
+	config->quota.info.tgid = tg->tgid;
+	config->quota.info.quota = tg->quota_percent;
+	config->quota.info.quota_peak = tg->quota_peak_percent;
+	config->quota.info.quota_sum = xnsched_quota_sum_all(sched);
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = put_config(SCHED_QUOTA, u_config, len, config, sizeof(*config));
+	xnfree(config);
+
+	return ret;
+bad_tgid:
+	xnlock_put_irqrestore(&nklock, s);
+	xnfree(config);
+
+	return -ESRCH;
+}
+
+#else /* !CONFIG_XENO_OPT_SCHED_QUOTA */
+
+static inline
+int set_quota_config(int cpu, union sched_config *config, size_t len)
+{
+	return -EINVAL;
+}
+
+static inline
+ssize_t get_quota_config(int cpu, void __user *u_config,
+			 size_t len,
+			 union sched_config *(*fetch_config)
+			 (int policy, const void __user *u_config,
+			  size_t *len),
+			 ssize_t (*put_config)(int policy, void __user *u_config,
+					       size_t u_len,
+					       const union sched_config *config,
+					       size_t len))
+{
+	return -EINVAL;
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */
+
+static union sched_config *
+sched_fetch_config(int policy, const void __user *u_config, size_t *len)
+{
+	union sched_config *buf;
+	int ret;
+
+	if (u_config == NULL)
+		return ERR_PTR(-EFAULT);
+
+	if (policy == SCHED_QUOTA && *len < sizeof(buf->quota))
+		return ERR_PTR(-EINVAL);
+
+	buf = xnmalloc(*len);
+	if (buf == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = cobalt_copy_from_user(buf, u_config, *len);
+	if (ret) {
+		xnfree(buf);
+		return ERR_PTR(ret);
+	}
+
+	return buf;
+}
+
+static int sched_ack_config(int policy, const union sched_config *config,
+			    void __user *u_config)
+{
+	union sched_config __user *u_p = u_config;
+
+	if (policy != SCHED_QUOTA)
+		return 0;
+
+	return u_p == NULL ? -EFAULT :
+		cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+				       sizeof(u_p->quota.info));
+}
+
+static ssize_t sched_put_config(int policy,
+				void __user *u_config, size_t u_len,
+				const union sched_config *config, size_t len)
+{
+	union sched_config *u_p = u_config;
+
+	if (u_config == NULL)
+		return -EFAULT;
+
+	if (policy == SCHED_QUOTA) {
+		if (u_len < sizeof(config->quota))
+			return -EINVAL;
+		return cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+					      sizeof(u_p->quota.info)) ?:
+			sizeof(u_p->quota.info);
+	}
+
+	return cobalt_copy_to_user(u_config, config, len) ?: len;
+}
+
+int __cobalt_sched_setconfig_np(int cpu, int policy,
+				void __user *u_config,
+				size_t len,
+				union sched_config *(*fetch_config)
+				(int policy, const void __user *u_config,
+				 size_t *len),
+				int (*ack_config)(int policy,
+						  const union sched_config *config,
+						  void __user *u_config))
+{
+	union sched_config *buf;
+	int ret;
+
+	trace_cobalt_sched_setconfig(cpu, policy, len);
+
+	if (cpu < 0 || cpu >= NR_CPUS || !xnsched_threading_cpu(cpu))
+		return -EINVAL;
+
+	if (len == 0)
+		return -EINVAL;
+
+	buf = fetch_config(policy, u_config, &len);
+	if (IS_ERR(buf))
+		return PTR_ERR(buf);
+
+	switch (policy)	{
+	case SCHED_TP:
+		ret = set_tp_config(cpu, buf, len);
+		break;
+	case SCHED_QUOTA:
+		ret = set_quota_config(cpu, buf, len);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	if (ret == 0)
+		ret = ack_config(policy, buf, u_config);
+
+	xnfree(buf);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_setconfig_np, conforming,
+	       (int cpu, int policy,
+		union sched_config __user *u_config,
+		size_t len))
+{
+	return __cobalt_sched_setconfig_np(cpu, policy, u_config, len,
+					   sched_fetch_config, sched_ack_config);
+}
+
+ssize_t __cobalt_sched_getconfig_np(int cpu, int policy,
+				    void __user *u_config,
+				    size_t len,
+				    union sched_config *(*fetch_config)
+				    (int policy, const void __user *u_config,
+				     size_t *len),
+				    ssize_t (*put_config)(int policy,
+							  void __user *u_config,
+							  size_t u_len,
+							  const union sched_config *config,
+							  size_t len))
+{
+	ssize_t ret;
+
+	switch (policy)	{
+	case SCHED_TP:
+		ret = get_tp_config(cpu, u_config, len,
+				    fetch_config, put_config);
+		break;
+	case SCHED_QUOTA:
+		ret = get_quota_config(cpu, u_config, len,
+				       fetch_config, put_config);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	trace_cobalt_sched_get_config(cpu, policy, ret);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_getconfig_np, conforming,
+	       (int cpu, int policy,
+		union sched_config __user *u_config,
+		size_t len))
+{
+	return __cobalt_sched_getconfig_np(cpu, policy, u_config, len,
+					   sched_fetch_config, sched_put_config);
+}
+
+int __cobalt_sched_weightprio(int policy,
+			      const struct sched_param_ex *param_ex)
+{
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+	int prio;
+
+	sched_class = cobalt_sched_policy_param(&param, policy,
+						param_ex, NULL);
+	if (sched_class == NULL)
+		return -EINVAL;
+
+	prio = param_ex->sched_priority;
+	if (prio < 0)
+		prio = -prio;
+
+	return prio + sched_class->weight;
+}
+
+COBALT_SYSCALL(sched_weightprio, current,
+	       (int policy, const struct sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+
+	if (cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex)))
+		return -EFAULT;
+
+	return __cobalt_sched_weightprio(policy, &param_ex);
+}
+
+int cobalt_sched_setscheduler_ex(pid_t pid,
+				 int policy,
+				 const struct sched_param_ex *param_ex,
+				 __u32 __user *u_winoff,
+				 int __user *u_promoted)
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	int ret, promoted = 0;
+	spl_t s;
+
+	trace_cobalt_sched_setscheduler(pid, policy, param_ex);
+
+	if (pid) {
+		xnlock_get_irqsave(&nklock, s);
+		thread = cobalt_thread_find(pid);
+		xnlock_put_irqrestore(&nklock, s);
+	} else
+		thread = cobalt_current_thread();
+
+	if (thread == NULL) {
+		if (u_winoff == NULL || pid != task_pid_vnr(current))
+			return -ESRCH;
+			
+		thread = cobalt_thread_shadow(&hkey, u_winoff);
+		if (IS_ERR(thread))
+			return PTR_ERR(thread);
+
+		promoted = 1;
+	}
+
+	ret = __cobalt_thread_setschedparam_ex(thread, policy, param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted));
+}
+
+COBALT_SYSCALL(sched_setscheduler_ex, conforming,
+	       (pid_t pid,
+		int policy,
+		const struct sched_param_ex __user *u_param,
+		__u32 __user *u_winoff,
+		int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+
+	if (cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex)))
+		return -EFAULT;
+
+	return cobalt_sched_setscheduler_ex(pid, policy, &param_ex,
+					    u_winoff, u_promoted);
+}
+
+int cobalt_sched_getscheduler_ex(pid_t pid,
+				 int *policy_r,
+				 struct sched_param_ex *param_ex)
+{
+	struct cobalt_thread *thread;
+	spl_t s;
+
+	trace_cobalt_sched_getscheduler(pid);
+
+	if (pid) {
+		xnlock_get_irqsave(&nklock, s);
+		thread = cobalt_thread_find(pid);
+		xnlock_put_irqrestore(&nklock, s);
+	} else
+		thread = cobalt_current_thread();
+
+	if (thread == NULL)
+		return -ESRCH;
+
+	return __cobalt_thread_getschedparam_ex(thread, policy_r, param_ex);
+}
+
+COBALT_SYSCALL(sched_getscheduler_ex, current,
+	       (pid_t pid,
+		int __user *u_policy,
+		struct sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_sched_getscheduler_ex(pid, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	if (cobalt_copy_to_user(u_param, &param_ex, sizeof(param_ex)) ||
+	    cobalt_copy_to_user(u_policy, &policy, sizeof(policy)))
+		return -EFAULT;
+
+	return 0;
+}
+
+void cobalt_sched_reclaim(struct cobalt_process *process)
+{
+	struct cobalt_resources *p = &process->resources;
+	struct cobalt_sched_group *group;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	while (!list_empty(&p->schedq)) {
+		group = list_get_entry(&p->schedq, struct cobalt_sched_group, next);
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+		xnsched_quota_destroy_group(&group->quota, 1, NULL);
+#endif
+		xnlock_put_irqrestore(&nklock, s);
+		xnfree(group);
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+++ linux-patched/kernel/xenomai/posix/sem.h	2022-03-21 12:58:29.064892131 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/corectl.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SEM_H
+#define _COBALT_POSIX_SEM_H
+
+#include <linux/kernel.h>
+#include <linux/fcntl.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/registry.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_process;
+struct filename;
+
+struct cobalt_sem {
+	unsigned int magic;
+	struct xnsynch synchbase;
+	struct cobalt_sem_state *state;
+	int flags;
+	unsigned int refs;
+	struct filename *pathname;
+	struct cobalt_resnode resnode;
+};
+
+/* Copied from Linuxthreads semaphore.h. */
+struct _sem_fastlock
+{
+  long int __status;
+  int __spinlock;
+};
+
+typedef struct
+{
+  struct _sem_fastlock __sem_lock;
+  int __sem_value;
+  long __sem_waiting;
+} sem_t;
+
+#include <cobalt/uapi/sem.h>
+
+#define SEM_VALUE_MAX	(INT_MAX)
+#define SEM_FAILED	NULL
+#define SEM_NAMED	0x80000000
+
+struct cobalt_sem_shadow __user *
+__cobalt_sem_open(struct cobalt_sem_shadow __user *usm,
+		  const char __user *u_name,
+		  int oflags, mode_t mode, unsigned int value);
+
+int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user *u_sem,
+			   const struct timespec64 *ts);
+
+int __cobalt_sem_timedwait64(struct cobalt_sem_shadow __user *u_sem,
+			     const struct __kernel_timespec __user *u_ts);
+
+int __cobalt_sem_destroy(xnhandle_t handle);
+
+void cobalt_nsem_reclaim(struct cobalt_process *process);
+
+struct cobalt_sem *
+__cobalt_sem_init(const char *name, struct cobalt_sem_shadow *sem,
+		  int flags, unsigned value);
+
+void __cobalt_sem_shadow_init(struct cobalt_sem *sem, __u32 magic,
+			      struct cobalt_sem_shadow *sm);
+
+COBALT_SYSCALL_DECL(sem_init,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     int flags, unsigned value));
+
+COBALT_SYSCALL_DECL(sem_post,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_wait,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_timedwait,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(sem_timedwait64,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(sem_trywait,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_getvalue,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     int __user *u_sval));
+
+COBALT_SYSCALL_DECL(sem_destroy,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_open,
+		    (struct cobalt_sem_shadow __user *__user *u_addrp,
+		     const char __user *u_name,
+		     int oflags, mode_t mode, unsigned int value));
+
+COBALT_SYSCALL_DECL(sem_close,
+		    (struct cobalt_sem_shadow __user *usm));
+
+COBALT_SYSCALL_DECL(sem_unlink, (const char __user *u_name));
+
+COBALT_SYSCALL_DECL(sem_broadcast_np,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_inquire,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     struct cobalt_sem_info __user *u_info,
+		     pid_t __user *u_waitlist,
+		     size_t waitsz));
+
+void cobalt_sem_reclaim(struct cobalt_resnode *node,
+			spl_t s);
+
+#endif /* !_COBALT_POSIX_SEM_H */
+++ linux-patched/kernel/xenomai/posix/corectl.h	2022-03-21 12:58:29.060892170 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/mqueue.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_CORECTL_H
+#define _COBALT_POSIX_CORECTL_H
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <xenomai/posix/syscall.h>
+#include <cobalt/uapi/corectl.h>
+
+struct cobalt_config_vector {
+	void __user *u_buf;
+	size_t u_bufsz;
+};
+
+COBALT_SYSCALL_DECL(corectl,
+		    (int request, void __user *u_buf, size_t u_bufsz));
+
+void cobalt_add_config_chain(struct notifier_block *nb);
+
+void cobalt_remove_config_chain(struct notifier_block *nb);
+
+#endif /* !_COBALT_POSIX_CORECTL_H */
+++ linux-patched/kernel/xenomai/posix/mqueue.c	2022-03-21 12:58:29.057892199 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/signal.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/stdarg.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <cobalt/kernel/select.h>
+#include <rtdm/fd.h>
+#include "internal.h"
+#include "thread.h"
+#include "signal.h"
+#include "timer.h"
+#include "mqueue.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+#define COBALT_MSGMAX		65536
+#define COBALT_MSGSIZEMAX	(16*1024*1024)
+#define COBALT_MSGPRIOMAX	32768
+
+struct cobalt_mq {
+	unsigned magic;
+
+	struct list_head link;
+
+	struct xnsynch receivers;
+	struct xnsynch senders;
+	size_t memsize;
+	char *mem;
+	struct list_head queued;
+	struct list_head avail;
+	int nrqueued;
+
+	/* mq_notify */
+	struct siginfo si;
+	mqd_t target_qd;
+	struct cobalt_thread *target;
+
+	struct mq_attr attr;
+
+	unsigned refs;
+	char name[COBALT_MAXNAME];
+	xnhandle_t handle;
+
+	DECLARE_XNSELECT(read_select);
+	DECLARE_XNSELECT(write_select);
+};
+
+struct cobalt_mqd {
+	struct cobalt_mq *mq;
+	struct rtdm_fd fd;
+};
+
+struct cobalt_msg {
+	struct list_head link;
+	unsigned int prio;
+	size_t len;
+	char data[0];
+};
+
+struct cobalt_mqwait_context {
+	struct xnthread_wait_context wc;
+	struct cobalt_msg *msg;
+};
+
+static struct mq_attr default_attr = {
+      .mq_maxmsg = 10,
+      .mq_msgsize = 8192,
+};
+
+static LIST_HEAD(cobalt_mqq);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static int mq_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	return 0;
+}
+
+static struct xnvfile_regular_ops mq_vfile_ops = {
+	.show = mq_vfile_show,
+};
+
+static struct xnpnode_regular __mq_pnode = {
+	.node = {
+		.dirname = "mqueue",
+		.root = &posix_ptree,
+		.ops = &xnregistry_vfreg_ops,
+	},
+	.vfile = {
+		.ops = &mq_vfile_ops,
+	},
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __mq_pnode = {
+	.node = {
+		.dirname = "mqueue",
+	}
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+
+static inline struct cobalt_msg *mq_msg_alloc(struct cobalt_mq *mq)
+{
+	if (list_empty(&mq->avail))
+		return NULL;
+
+	return list_get_entry(&mq->avail, struct cobalt_msg, link);
+}
+
+static inline void mq_msg_free(struct cobalt_mq *mq, struct cobalt_msg * msg)
+{
+	list_add(&msg->link, &mq->avail); /* For earliest re-use of the block. */
+}
+
+static inline int mq_init(struct cobalt_mq *mq, const struct mq_attr *attr)
+{
+	unsigned i, msgsize, memsize;
+	char *mem;
+
+	if (attr == NULL)
+		attr = &default_attr;
+	else {
+		if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
+			return -EINVAL;
+		if (attr->mq_maxmsg > COBALT_MSGMAX)
+			return -EINVAL;
+		if (attr->mq_msgsize > COBALT_MSGSIZEMAX)
+			return -EINVAL;
+	}
+
+	msgsize = attr->mq_msgsize + sizeof(struct cobalt_msg);
+
+	/* Align msgsize on natural boundary. */
+	if ((msgsize % sizeof(unsigned long)))
+		msgsize +=
+		    sizeof(unsigned long) - (msgsize % sizeof(unsigned long));
+
+	memsize = msgsize * attr->mq_maxmsg;
+	memsize = PAGE_ALIGN(memsize);
+	if (get_order(memsize) > MAX_ORDER)
+		return -ENOSPC;
+
+	mem = xnheap_vmalloc(memsize);
+	if (mem == NULL)
+		return -ENOSPC;
+
+	mq->memsize = memsize;
+	INIT_LIST_HEAD(&mq->queued);
+	mq->nrqueued = 0;
+	xnsynch_init(&mq->receivers, XNSYNCH_PRIO, NULL);
+	xnsynch_init(&mq->senders, XNSYNCH_PRIO, NULL);
+	mq->mem = mem;
+
+	/* Fill the pool. */
+	INIT_LIST_HEAD(&mq->avail);
+	for (i = 0; i < attr->mq_maxmsg; i++) {
+		struct cobalt_msg *msg = (struct cobalt_msg *) (mem + i * msgsize);
+		mq_msg_free(mq, msg);
+	}
+
+	mq->attr = *attr;
+	mq->target = NULL;
+	xnselect_init(&mq->read_select);
+	xnselect_init(&mq->write_select);
+	mq->magic = COBALT_MQ_MAGIC;
+	mq->refs = 2;
+	INIT_LIST_HEAD(&mq->link);
+
+	return 0;
+}
+
+static inline void mq_destroy(struct cobalt_mq *mq)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xnsynch_destroy(&mq->receivers);
+	xnsynch_destroy(&mq->senders);
+	list_del(&mq->link);
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+	xnselect_destroy(&mq->read_select); /* Reschedules. */
+	xnselect_destroy(&mq->write_select); /* Ditto. */
+	xnregistry_remove(mq->handle);
+	xnheap_vfree(mq->mem);
+	kfree(mq);
+}
+
+static int mq_unref_inner(struct cobalt_mq *mq, spl_t s)
+{
+	int destroy;
+
+	destroy = --mq->refs == 0;
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (destroy)
+		mq_destroy(mq);
+
+	return destroy;
+}
+
+static int mq_unref(struct cobalt_mq *mq)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	return mq_unref_inner(mq, s);
+}
+
+static void mqd_close(struct rtdm_fd *fd)
+{
+	struct cobalt_mqd *mqd = container_of(fd, struct cobalt_mqd, fd);
+	struct cobalt_mq *mq = mqd->mq;
+
+	kfree(mqd);
+	mq_unref(mq);
+}
+
+int
+mqd_select(struct rtdm_fd *fd, struct xnselector *selector,
+	   unsigned type, unsigned index)
+{
+	struct cobalt_mqd *mqd = container_of(fd, struct cobalt_mqd, fd);
+	struct xnselect_binding *binding;
+	struct cobalt_mq *mq;
+	int err;
+	spl_t s;
+
+	if (type == XNSELECT_READ || type == XNSELECT_WRITE) {
+		binding = xnmalloc(sizeof(*binding));
+		if (!binding)
+			return -ENOMEM;
+	} else
+		return -EBADF;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq = mqd->mq;
+
+	switch(type) {
+	case XNSELECT_READ:
+		err = -EBADF;
+		if ((rtdm_fd_flags(fd) & COBALT_PERMS_MASK) == O_WRONLY)
+			goto unlock_and_error;
+
+		err = xnselect_bind(&mq->read_select, binding,
+				selector, type, index,
+				!list_empty(&mq->queued));
+		if (err)
+			goto unlock_and_error;
+		break;
+
+	case XNSELECT_WRITE:
+		err = -EBADF;
+		if ((rtdm_fd_flags(fd) & COBALT_PERMS_MASK) == O_RDONLY)
+			goto unlock_and_error;
+
+		err = xnselect_bind(&mq->write_select, binding,
+				selector, type, index,
+				!list_empty(&mq->avail));
+		if (err)
+			goto unlock_and_error;
+		break;
+	}
+	xnlock_put_irqrestore(&nklock, s);
+	return 0;
+
+      unlock_and_error:
+	xnlock_put_irqrestore(&nklock, s);
+	xnfree(binding);
+	return err;
+}
+
+static struct rtdm_fd_ops mqd_ops = {
+	.close = mqd_close,
+	.select = mqd_select,
+};
+
+static inline int mqd_create(struct cobalt_mq *mq, unsigned long flags, int ufd)
+{
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	if (cobalt_ppd_get(0) == &cobalt_kernel_ppd)
+		return -EPERM;
+
+	mqd = kmalloc(sizeof(*mqd), GFP_KERNEL);
+	if (mqd == NULL)
+		return -ENOSPC;
+
+	mqd->fd.oflags = flags;
+	mqd->mq = mq;
+
+	ret = rtdm_fd_enter(&mqd->fd, ufd, COBALT_MQD_MAGIC, &mqd_ops);
+	if (ret < 0)
+		return ret;
+
+	return rtdm_fd_register(&mqd->fd, ufd);
+}
+
+static int mq_open(int uqd, const char *name, int oflags,
+		   int mode, struct mq_attr *attr)
+{
+	struct cobalt_mq *mq;
+	xnhandle_t handle;
+	spl_t s;
+	int err;
+
+	if (name[0] != '/' || name[1] == '\0')
+		return -EINVAL;
+
+  retry_bind:
+	err = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle);
+	switch (err) {
+	case 0:
+		/* Found */
+		if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+			return -EEXIST;
+
+		xnlock_get_irqsave(&nklock, s);
+		mq = xnregistry_lookup(handle, NULL);
+		if (mq && mq->magic != COBALT_MQ_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+
+		if (mq) {
+			++mq->refs;
+			xnlock_put_irqrestore(&nklock, s);
+		} else {
+			xnlock_put_irqrestore(&nklock, s);
+			goto retry_bind;
+		}
+
+		err = mqd_create(mq, oflags & (O_NONBLOCK | COBALT_PERMS_MASK),
+				uqd);
+		if (err < 0) {
+			mq_unref(mq);
+			return err;
+		}
+		break;
+
+	case -EWOULDBLOCK:
+		/* Not found */
+		if ((oflags & O_CREAT) == 0)
+			return (mqd_t)-ENOENT;
+
+		mq = kmalloc(sizeof(*mq), GFP_KERNEL);
+		if (mq == NULL)
+			return -ENOSPC;
+
+		err = mq_init(mq, attr);
+		if (err) {
+			kfree(mq);
+			return err;
+		}
+
+		snprintf(mq->name, sizeof(mq->name), "%s", &name[1]);
+
+		err = mqd_create(mq, oflags & (O_NONBLOCK | COBALT_PERMS_MASK),
+				uqd);
+		if (err < 0) {
+			mq_destroy(mq);
+			return err;
+		}
+
+		xnlock_get_irqsave(&nklock, s);
+		err = xnregistry_enter(mq->name, mq, &mq->handle,
+				       &__mq_pnode.node);
+		if (err < 0)
+			--mq->refs;
+		else
+			list_add_tail(&mq->link, &cobalt_mqq);
+		xnlock_put_irqrestore(&nklock, s);
+		if (err < 0) {
+			rtdm_fd_close(uqd, COBALT_MQD_MAGIC);
+			if (err == -EEXIST)
+				goto retry_bind;
+			return err;
+		}
+		break;
+
+	default:
+		return err;
+	}
+
+	return 0;
+}
+
+static inline int mq_close(mqd_t fd)
+{
+	int err;
+
+	err = rtdm_fd_close(fd, COBALT_MQD_MAGIC);
+	return err == -EADV ? -EBADF : err;
+}
+
+static inline int mq_unlink(const char *name)
+{
+	struct cobalt_mq *mq;
+	xnhandle_t handle;
+	spl_t s;
+	int err;
+
+	if (name[0] != '/' || name[1] == '\0')
+		return -EINVAL;
+
+	err = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle);
+	if (err == -EWOULDBLOCK)
+		return -ENOENT;
+	if (err)
+		return err;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq = xnregistry_lookup(handle, NULL);
+	if (!mq) {
+		err = -ENOENT;
+		goto err_unlock;
+	}
+	if (mq->magic != COBALT_MQ_MAGIC) {
+		err = -EINVAL;
+	  err_unlock:
+		xnlock_put_irqrestore(&nklock, s);
+
+		return err;
+	}
+	if (mq_unref_inner(mq, s) == 0)
+		xnregistry_unlink(&name[1]);
+	return 0;
+}
+
+static inline struct cobalt_msg *
+mq_trysend(struct cobalt_mqd *mqd, size_t len)
+{
+	struct cobalt_msg *msg;
+	struct cobalt_mq *mq;
+	unsigned flags;
+
+	mq = mqd->mq;
+	flags = rtdm_fd_flags(&mqd->fd) & COBALT_PERMS_MASK;
+
+	if (flags != O_WRONLY && flags != O_RDWR)
+		return ERR_PTR(-EBADF);
+
+	if (len > mq->attr.mq_msgsize)
+		return ERR_PTR(-EMSGSIZE);
+
+	msg = mq_msg_alloc(mq);
+	if (msg == NULL)
+		return ERR_PTR(-EAGAIN);
+
+	if (list_empty(&mq->avail))
+		xnselect_signal(&mq->write_select, 0);
+
+	return msg;
+}
+
+static inline struct cobalt_msg *
+mq_tryrcv(struct cobalt_mqd *mqd, size_t len)
+{
+	struct cobalt_msg *msg;
+	unsigned int flags;
+	struct cobalt_mq *mq;
+
+	mq = mqd->mq;
+	flags = rtdm_fd_flags(&mqd->fd) & COBALT_PERMS_MASK;
+
+	if (flags != O_RDONLY && flags != O_RDWR)
+		return ERR_PTR(-EBADF);
+
+	if (len < mq->attr.mq_msgsize)
+		return ERR_PTR(-EMSGSIZE);
+
+	if (list_empty(&mq->queued))
+		return ERR_PTR(-EAGAIN);
+
+	msg = list_get_entry(&mq->queued, struct cobalt_msg, link);
+	mq->nrqueued--;
+
+	if (list_empty(&mq->queued))
+		xnselect_signal(&mq->read_select, 0);
+
+	return msg;
+}
+
+static struct cobalt_msg *
+mq_timedsend_inner(struct cobalt_mqd *mqd,
+		   size_t len, const void __user *u_ts,
+		   int (*fetch_timeout)(struct timespec64 *ts,
+					const void __user *u_ts))
+{
+	struct cobalt_mqwait_context mwc;
+	struct cobalt_msg *msg;
+	struct cobalt_mq *mq;
+	struct timespec64 ts;
+	xntmode_t tmode;
+	xnticks_t to;
+	spl_t s;
+	int ret;
+
+	to = XN_INFINITE;
+	tmode = XN_RELATIVE;
+redo:
+	xnlock_get_irqsave(&nklock, s);
+	msg = mq_trysend(mqd, len);
+	if (msg != ERR_PTR(-EAGAIN))
+		goto out;
+
+	if (rtdm_fd_flags(&mqd->fd) & O_NONBLOCK)
+		goto out;
+
+	if (fetch_timeout) {
+		xnlock_put_irqrestore(&nklock, s);
+		ret = fetch_timeout(&ts, u_ts);
+		if (ret)
+			return ERR_PTR(ret);
+		if (!timespec64_valid(&ts))
+			return ERR_PTR(-EINVAL);
+		to = ts2ns(&ts) + 1;
+		tmode = XN_REALTIME;
+		fetch_timeout = NULL;
+		goto redo;
+	}
+
+	mq = mqd->mq;
+	xnthread_prepare_wait(&mwc.wc);
+	ret = xnsynch_sleep_on(&mq->senders, to, tmode);
+	if (ret) {
+		if (ret & XNBREAK)
+			msg = ERR_PTR(-EINTR);
+		else if (ret & XNTIMEO)
+			msg = ERR_PTR(-ETIMEDOUT);
+		else if (ret & XNRMID)
+			msg = ERR_PTR(-EBADF);
+	} else
+		msg = mwc.msg;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return msg;
+}
+
+static void mq_release_msg(struct cobalt_mq *mq, struct cobalt_msg *msg)
+{
+	struct cobalt_mqwait_context *mwc;
+	struct xnthread_wait_context *wc;
+	struct xnthread *thread;
+
+	/*
+	 * Try passing the free message slot to a waiting sender, link
+	 * it to the free queue otherwise.
+	 */
+	if (xnsynch_pended_p(&mq->senders)) {
+		thread = xnsynch_wakeup_one_sleeper(&mq->senders);
+		wc = xnthread_get_wait_context(thread);
+		mwc = container_of(wc, struct cobalt_mqwait_context, wc);
+		mwc->msg = msg;
+		xnthread_complete_wait(wc);
+	} else {
+		mq_msg_free(mq, msg);
+		if (list_is_singular(&mq->avail))
+			xnselect_signal(&mq->write_select, 1);
+	}
+}
+
+static int
+mq_finish_send(struct cobalt_mqd *mqd, struct cobalt_msg *msg)
+{
+	struct cobalt_mqwait_context *mwc;
+	struct xnthread_wait_context *wc;
+	struct cobalt_sigpending *sigp;
+	struct xnthread *thread;
+	struct cobalt_mq *mq;
+	spl_t s;
+
+	mq = mqd->mq;
+
+	xnlock_get_irqsave(&nklock, s);
+	/* Can we do pipelined sending? */
+	if (xnsynch_pended_p(&mq->receivers)) {
+		thread = xnsynch_wakeup_one_sleeper(&mq->receivers);
+		wc = xnthread_get_wait_context(thread);
+		mwc = container_of(wc, struct cobalt_mqwait_context, wc);
+		mwc->msg = msg;
+		xnthread_complete_wait(wc);
+	} else {
+		/* Nope, have to go through the queue. */
+		list_add_priff(msg, &mq->queued, prio, link);
+		mq->nrqueued++;
+
+		/*
+		 * If first message and no pending reader, send a
+		 * signal if notification was enabled via mq_notify().
+		 */
+		if (list_is_singular(&mq->queued)) {
+			xnselect_signal(&mq->read_select, 1);
+			if (mq->target) {
+				sigp = cobalt_signal_alloc();
+				if (sigp) {
+					cobalt_copy_siginfo(SI_MESGQ, &sigp->si, &mq->si);
+					if (cobalt_signal_send(mq->target, sigp, 0) <= 0)
+						cobalt_signal_free(sigp);
+				}
+				mq->target = NULL;
+			}
+		}
+	}
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static struct cobalt_msg *
+mq_timedrcv_inner(struct cobalt_mqd *mqd,
+		  size_t len,
+		  const void __user *u_ts,
+		  int (*fetch_timeout)(struct timespec64 *ts,
+				       const void __user *u_ts))
+{
+	struct cobalt_mqwait_context mwc;
+	struct cobalt_msg *msg;
+	struct cobalt_mq *mq;
+	struct timespec64 ts;
+	xntmode_t tmode;
+	xnticks_t to;
+	spl_t s;
+	int ret;
+
+	to = XN_INFINITE;
+	tmode = XN_RELATIVE;
+redo:
+	xnlock_get_irqsave(&nklock, s);
+	msg = mq_tryrcv(mqd, len);
+	if (msg != ERR_PTR(-EAGAIN))
+		goto out;
+
+	if (rtdm_fd_flags(&mqd->fd) & O_NONBLOCK)
+		goto out;
+
+	if (fetch_timeout) {
+		xnlock_put_irqrestore(&nklock, s);
+		ret = fetch_timeout(&ts, u_ts);
+		if (ret)
+			return ERR_PTR(ret);
+		if (!timespec64_valid(&ts))
+			return ERR_PTR(-EINVAL);
+		to = ts2ns(&ts) + 1;
+		tmode = XN_REALTIME;
+		fetch_timeout = NULL;
+		goto redo;
+	}
+
+	mq = mqd->mq;
+	xnthread_prepare_wait(&mwc.wc);
+	ret = xnsynch_sleep_on(&mq->receivers, to, tmode);
+	if (ret == 0)
+		msg = mwc.msg;
+	else if (ret & XNRMID)
+		msg = ERR_PTR(-EBADF);
+	else if (ret & XNTIMEO)
+		msg = ERR_PTR(-ETIMEDOUT);
+	else
+		msg = ERR_PTR(-EINTR);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return msg;
+}
+
+static int
+mq_finish_rcv(struct cobalt_mqd *mqd, struct cobalt_msg *msg)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq_release_msg(mqd->mq, msg);
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static inline int mq_getattr(struct cobalt_mqd *mqd, struct mq_attr *attr)
+{
+	struct cobalt_mq *mq;
+	spl_t s;
+
+	mq = mqd->mq;
+	*attr = mq->attr;
+	xnlock_get_irqsave(&nklock, s);
+	attr->mq_flags = rtdm_fd_flags(&mqd->fd);
+	attr->mq_curmsgs = mq->nrqueued;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static inline int
+mq_notify(struct cobalt_mqd *mqd, unsigned index, const struct sigevent *evp)
+{
+	struct cobalt_thread *thread = cobalt_current_thread();
+	struct cobalt_mq *mq;
+	int err;
+	spl_t s;
+
+	if (evp && ((evp->sigev_notify != SIGEV_SIGNAL &&
+		     evp->sigev_notify != SIGEV_NONE) ||
+		    (unsigned int)(evp->sigev_signo - 1) > SIGRTMAX - 1))
+		return -EINVAL;
+
+	if (xnsched_interrupt_p() || thread == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq = mqd->mq;
+	if (mq->target && mq->target != thread) {
+		err = -EBUSY;
+		goto unlock_and_error;
+	}
+
+	if (evp == NULL || evp->sigev_notify == SIGEV_NONE)
+		/* Here, mq->target == cobalt_current_thread() or NULL. */
+		mq->target = NULL;
+	else {
+		mq->target = thread;
+		mq->target_qd = index;
+		mq->si.si_signo = evp->sigev_signo;
+		mq->si.si_errno = 0;
+		mq->si.si_code = SI_MESGQ;
+		mq->si.si_value = evp->sigev_value;
+		/*
+		 * XXX: we differ from the regular kernel here, which
+		 * passes the sender's pid/uid data into the
+		 * receiver's namespaces. We pass the receiver's creds
+		 * into the init namespace instead.
+		 */
+		mq->si.si_pid = task_pid_nr(current);
+		mq->si.si_uid = get_current_uuid();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+	return 0;
+
+      unlock_and_error:
+	xnlock_put_irqrestore(&nklock, s);
+	return err;
+}
+
+static inline struct cobalt_mqd *cobalt_mqd_get(mqd_t ufd)
+{
+	struct rtdm_fd *fd;
+
+	fd = rtdm_fd_get(ufd, COBALT_MQD_MAGIC);
+	if (IS_ERR(fd)) {
+		int err = PTR_ERR(fd);
+		if (err == -EADV)
+			err = cobalt_current_process() ? -EBADF : -EPERM;
+		return ERR_PTR(err);
+	}
+
+	return container_of(fd, struct cobalt_mqd, fd);
+}
+
+static inline void cobalt_mqd_put(struct cobalt_mqd *mqd)
+{
+	rtdm_fd_put(&mqd->fd);
+}
+
+int __cobalt_mq_notify(mqd_t fd, const struct sigevent *evp)
+{
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	mqd = cobalt_mqd_get(fd);
+	if (IS_ERR(mqd))
+		ret = PTR_ERR(mqd);
+	else {
+		trace_cobalt_mq_notify(fd, evp);
+		ret = mq_notify(mqd, fd, evp);
+		cobalt_mqd_put(mqd);
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(mq_notify, primary,
+	       (mqd_t fd, const struct sigevent *__user evp))
+{
+	struct sigevent sev;
+
+	if (evp && cobalt_copy_from_user(&sev, evp, sizeof(sev)))
+		return -EFAULT;
+
+	return __cobalt_mq_notify(fd, evp ? &sev : NULL);
+}
+
+int __cobalt_mq_open(const char __user *u_name, int oflags,
+		     mode_t mode, struct mq_attr *attr)
+{
+	char name[COBALT_MAXNAME];
+	unsigned int len;
+	mqd_t uqd;
+	int ret;
+
+	len = cobalt_strncpy_from_user(name, u_name, sizeof(name));
+	if (len < 0)
+		return -EFAULT;
+
+	if (len >= sizeof(name))
+		return -ENAMETOOLONG;
+
+	if (len == 0)
+		return -EINVAL;
+
+	trace_cobalt_mq_open(name, oflags, mode);
+
+	uqd = __rtdm_anon_getfd("[cobalt-mq]", oflags);
+	if (uqd < 0)
+		return uqd;
+
+	ret = mq_open(uqd, name, oflags, mode, attr);
+	if (ret < 0) {
+		__rtdm_anon_putfd(uqd);
+		return ret;
+	}
+
+	return uqd;
+}
+
+COBALT_SYSCALL(mq_open, lostage,
+	       (const char __user *u_name, int oflags,
+		mode_t mode, struct mq_attr __user *u_attr))
+{
+	struct mq_attr _attr, *attr = &_attr;
+
+	if ((oflags & O_CREAT) && u_attr) {
+		if (cobalt_copy_from_user(&_attr, u_attr, sizeof(_attr)))
+			return -EFAULT;
+	} else
+		attr = NULL;
+
+	return __cobalt_mq_open(u_name, oflags, mode, attr);
+}
+
+COBALT_SYSCALL(mq_close, lostage, (mqd_t uqd))
+{
+	trace_cobalt_mq_close(uqd);
+
+	return mq_close(uqd);
+}
+
+COBALT_SYSCALL(mq_unlink, lostage, (const char __user *u_name))
+{
+	char name[COBALT_MAXNAME];
+	unsigned len;
+
+	len = cobalt_strncpy_from_user(name, u_name, sizeof(name));
+	if (len < 0)
+		return -EFAULT;
+	if (len >= sizeof(name))
+		return -ENAMETOOLONG;
+
+	trace_cobalt_mq_unlink(name);
+
+	return mq_unlink(name);
+}
+
+int __cobalt_mq_getattr(mqd_t uqd, struct mq_attr *attr)
+{
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	mqd = cobalt_mqd_get(uqd);
+	if (IS_ERR(mqd))
+		return PTR_ERR(mqd);
+
+	ret = mq_getattr(mqd, attr);
+	cobalt_mqd_put(mqd);
+	if (ret)
+		return ret;
+
+	trace_cobalt_mq_getattr(uqd, attr);
+
+	return 0;
+}
+
+COBALT_SYSCALL(mq_getattr, current,
+	       (mqd_t uqd, struct mq_attr __user *u_attr))
+{
+	struct mq_attr attr;
+	int ret;
+
+	ret = __cobalt_mq_getattr(uqd, &attr);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_attr, &attr, sizeof(attr));
+}
+
+static inline int mq_fetch_timeout(struct timespec64 *ts,
+				   const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts);
+}
+
+static inline int mq_fetch_timeout64(struct timespec64 *ts,
+				     const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts);
+}
+
+int __cobalt_mq_timedsend(mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio, const void __user *u_ts,
+			  int (*fetch_timeout)(struct timespec64 *ts,
+					       const void __user *u_ts))
+{
+	struct cobalt_msg *msg;
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	mqd = cobalt_mqd_get(uqd);
+	if (IS_ERR(mqd))
+		return PTR_ERR(mqd);
+
+	if (prio >= COBALT_MSGPRIOMAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (len > 0 && !access_rok(u_buf, len)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	trace_cobalt_mq_send(uqd, u_buf, len, prio);
+	msg = mq_timedsend_inner(mqd, len, u_ts, fetch_timeout);
+	if (IS_ERR(msg)) {
+		ret = PTR_ERR(msg);
+		goto out;
+	}
+
+	ret = cobalt_copy_from_user(msg->data, u_buf, len);
+	if (ret) {
+		mq_finish_rcv(mqd, msg);
+		goto out;
+	}
+	msg->len = len;
+	msg->prio = prio;
+	ret = mq_finish_send(mqd, msg);
+out:
+	cobalt_mqd_put(mqd);
+
+	return ret;
+}
+
+int __cobalt_mq_timedsend64(mqd_t uqd, const void __user *u_buf, size_t len,
+			    unsigned int prio, const void __user *u_ts)
+{
+	return __cobalt_mq_timedsend(uqd, u_buf, len, prio, u_ts,
+				     u_ts ? mq_fetch_timeout64 : NULL);
+}
+
+COBALT_SYSCALL(mq_timedsend, primary,
+	       (mqd_t uqd, const void __user *u_buf, size_t len,
+		unsigned int prio, const struct __user_old_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedsend(uqd, u_buf, len, prio,
+				     u_ts, u_ts ? mq_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL(mq_timedsend64, primary,
+	       (mqd_t uqd, const void __user *u_buf, size_t len,
+		unsigned int prio, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedsend64(uqd, u_buf, len, prio, u_ts);
+}
+
+int __cobalt_mq_timedreceive(mqd_t uqd, void __user *u_buf,
+			     ssize_t *lenp,
+			     unsigned int __user *u_prio,
+			     const void __user *u_ts,
+			     int (*fetch_timeout)(struct timespec64 *ts,
+						  const void __user *u_ts))
+{
+	struct cobalt_mqd *mqd;
+	struct cobalt_msg *msg;
+	unsigned int prio;
+	int ret;
+
+	mqd = cobalt_mqd_get(uqd);
+	if (IS_ERR(mqd))
+		return PTR_ERR(mqd);
+
+	if (*lenp > 0 && !access_wok(u_buf, *lenp)) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	msg = mq_timedrcv_inner(mqd, *lenp, u_ts, fetch_timeout);
+	if (IS_ERR(msg)) {
+		ret = PTR_ERR(msg);
+		goto fail;
+	}
+
+	ret = cobalt_copy_to_user(u_buf, msg->data, msg->len);
+	if (ret) {
+		mq_finish_rcv(mqd, msg);
+		goto fail;
+	}
+
+	*lenp = msg->len;
+	prio = msg->prio;
+	ret = mq_finish_rcv(mqd, msg);
+	if (ret)
+		goto fail;
+
+	cobalt_mqd_put(mqd);
+
+	if (u_prio && __xn_put_user(prio, u_prio))
+		return -EFAULT;
+
+	return 0;
+fail:
+	cobalt_mqd_put(mqd);
+
+	return ret;
+}
+
+int __cobalt_mq_timedreceive64(mqd_t uqd, void __user *u_buf,
+			       ssize_t __user *u_len,
+			       unsigned int __user *u_prio,
+			       const void __user *u_ts)
+{
+	ssize_t len;
+	int ret;
+
+	ret = cobalt_copy_from_user(&len, u_len, sizeof(len));
+	if (ret)
+		return ret;
+
+	ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio, u_ts,
+				       u_ts ? mq_fetch_timeout64 : NULL);
+
+	return ret ?: cobalt_copy_to_user(u_len, &len, sizeof(*u_len));
+}
+
+COBALT_SYSCALL(mq_timedreceive, primary,
+	       (mqd_t uqd, void __user *u_buf,
+		ssize_t __user *u_len,
+		unsigned int __user *u_prio,
+		const struct __user_old_timespec __user *u_ts))
+{
+	ssize_t len;
+	int ret;
+
+	ret = cobalt_copy_from_user(&len, u_len, sizeof(len));
+	if (ret)
+		return ret;
+
+	ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio,
+				       u_ts, u_ts ? mq_fetch_timeout : NULL);
+
+	return ret ?: cobalt_copy_to_user(u_len, &len, sizeof(*u_len));
+}
+
+COBALT_SYSCALL(mq_timedreceive64, primary,
+	       (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+		unsigned int __user *u_prio,
+		const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedreceive64(uqd, u_buf, u_len, u_prio, u_ts);
+}
+++ linux-patched/kernel/xenomai/posix/signal.c	2022-03-21 12:58:29.054892228 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/io.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/sched.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/compat.h>
+#include <cobalt/kernel/time.h>
+#include "internal.h"
+#include "signal.h"
+#include "thread.h"
+#include "timer.h"
+#include "clock.h"
+
+static void *sigpending_mem;
+
+static LIST_HEAD(sigpending_pool);
+
+/*
+ * How many signal notifications which may be pending at any given
+ * time, except timers.  Cobalt signals are always thread directed,
+ * and we assume that in practice, each signal number is processed by
+ * a dedicated thread. We provide for up to three real-time signal
+ * events to pile up, and a single notification pending for other
+ * signals. Timers use a fast queuing logic maintaining a count of
+ * overruns, and therefore do not consume any memory from this pool.
+ */
+#define __SIGPOOL_SIZE  (sizeof(struct cobalt_sigpending) *	\
+			 (_NSIG + (SIGRTMAX - SIGRTMIN) * 2))
+
+static int cobalt_signal_deliver(struct cobalt_thread *thread,
+				 struct cobalt_sigpending *sigp,
+				 int group)
+{				/* nklocked, IRQs off */
+	struct cobalt_sigwait_context *swc;
+	struct xnthread_wait_context *wc;
+	struct list_head *sigwaiters;
+	int sig, ret;
+
+	sig = sigp->si.si_signo;
+	XENO_BUG_ON(COBALT, sig < 1 || sig > _NSIG);
+
+	/*
+	 * Attempt to deliver the signal immediately to the initial
+	 * target that waits for it.
+	 */
+	if (xnsynch_pended_p(&thread->sigwait)) {
+		wc = xnthread_get_wait_context(&thread->threadbase);
+		swc = container_of(wc, struct cobalt_sigwait_context, wc);
+		if (sigismember(swc->set, sig))
+			goto deliver;
+	}
+
+	/*
+	 * If that does not work out and we are sending to a thread
+	 * group, try to deliver to any thread from the same process
+	 * waiting for that signal.
+	 */
+	sigwaiters = &thread->process->sigwaiters;
+	if (!group || list_empty(sigwaiters))
+		return 0;
+
+	list_for_each_entry(thread, sigwaiters, signext) {
+		wc = xnthread_get_wait_context(&thread->threadbase);
+		swc = container_of(wc, struct cobalt_sigwait_context, wc);
+		if (sigismember(swc->set, sig))
+			goto deliver;
+	}
+
+	return 0;
+deliver:
+	cobalt_copy_siginfo(sigp->si.si_code, swc->si, &sigp->si);
+	cobalt_call_extension(signal_deliver, &thread->extref,
+			      ret, swc->si, sigp);
+	xnthread_complete_wait(&swc->wc);
+	xnsynch_wakeup_one_sleeper(&thread->sigwait);
+	list_del(&thread->signext);
+
+	/*
+	 * This is an immediate delivery bypassing any queuing, so we
+	 * have to release the sigpending data right away before
+	 * leaving.
+	 */
+	cobalt_signal_free(sigp);
+
+	return 1;
+}
+
+int cobalt_signal_send(struct cobalt_thread *thread,
+		       struct cobalt_sigpending *sigp,
+		       int group)
+{				/* nklocked, IRQs off */
+	struct list_head *sigq;
+	int sig, ret;
+
+	/* Can we deliver this signal immediately? */
+	ret = cobalt_signal_deliver(thread, sigp, group);
+	if (ret)
+		return ret;	/* Yep, done. */
+
+	/*
+	 * Nope, attempt to queue it. We start by calling any Cobalt
+	 * extension for queuing the signal first.
+	 */
+	if (cobalt_call_extension(signal_queue, &thread->extref, ret, sigp)) {
+		if (ret)
+			/* Queuing done remotely or error. */
+			return ret;
+	}
+
+	sig = sigp->si.si_signo;
+	sigq = thread->sigqueues + sig - 1;
+	if (!list_empty(sigq)) {
+		/* Queue non-rt signals only once. */
+		if (sig < SIGRTMIN)
+			return 0;
+		/* Queue rt signal source only once (SI_TIMER). */
+		if (!list_empty(&sigp->next))
+			return 0;
+	}
+
+	sigaddset(&thread->sigpending, sig);
+	list_add_tail(&sigp->next, sigq);
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_send);
+
+int cobalt_signal_send_pid(pid_t pid, struct cobalt_sigpending *sigp)
+{				/* nklocked, IRQs off */
+	struct cobalt_thread *thread;
+
+	thread = cobalt_thread_find(pid);
+	if (thread)
+		return cobalt_signal_send(thread, sigp, 0);
+
+	return -ESRCH;
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_send_pid);
+
+struct cobalt_sigpending *cobalt_signal_alloc(void)
+{				/* nklocked, IRQs off */
+	struct cobalt_sigpending *sigp;
+
+	if (list_empty(&sigpending_pool)) {
+		if (xnclock_ratelimit())
+			printk(XENO_WARNING "signal bucket pool underflows\n");
+		return NULL;
+	}
+
+	sigp = list_get_entry(&sigpending_pool, struct cobalt_sigpending, next);
+	INIT_LIST_HEAD(&sigp->next);
+
+	return sigp;
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_alloc);
+
+void cobalt_signal_free(struct cobalt_sigpending *sigp)
+{				/* nklocked, IRQs off */
+	if ((void *)sigp >= sigpending_mem &&
+	    (void *)sigp < sigpending_mem + __SIGPOOL_SIZE)
+		list_add_tail(&sigp->next, &sigpending_pool);
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_free);
+
+void cobalt_signal_flush(struct cobalt_thread *thread)
+{
+	struct cobalt_sigpending *sigp, *tmp;
+	struct list_head *sigq;
+	spl_t s;
+	int n;
+
+	/*
+	 * TCB is not accessible from userland anymore, no locking
+	 * required.
+	 */
+	if (sigisemptyset(&thread->sigpending))
+		return;
+
+	for (n = 0; n < _NSIG; n++) {
+		sigq = thread->sigqueues + n;
+		if (list_empty(sigq))
+			continue;
+		/*
+		 * sigpending blocks must be unlinked so that we
+		 * detect this fact when deleting their respective
+		 * owners.
+		 */
+		list_for_each_entry_safe(sigp, tmp, sigq, next) {
+			list_del_init(&sigp->next);
+			if ((void *)sigp >= sigpending_mem &&
+			    (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) {
+				xnlock_get_irqsave(&nklock, s);
+				list_add_tail(&sigp->next, &sigpending_pool);
+				xnlock_put_irqrestore(&nklock, s);
+			}
+		}
+	}
+
+	sigemptyset(&thread->sigpending);
+}
+
+static int signal_put_siginfo(void __user *u_si, const struct siginfo *si,
+			      int overrun)
+{
+	struct siginfo __user *u_p = u_si;
+	int ret;
+
+	ret = __xn_put_user(si->si_signo, &u_p->si_signo);
+	ret |= __xn_put_user(si->si_errno, &u_p->si_errno);
+	ret |= __xn_put_user(si->si_code, &u_p->si_code);
+
+	/*
+	 * Copy the generic/standard siginfo bits to userland.
+	 */
+	switch (si->si_code) {
+	case SI_TIMER:
+		ret |= __xn_put_user(si->si_tid, &u_p->si_tid);
+		ret |= __xn_put_user(si->si_ptr, &u_p->si_ptr);
+		ret |= __xn_put_user(overrun, &u_p->si_overrun);
+		break;
+	case SI_QUEUE:
+	case SI_MESGQ:
+		ret |= __xn_put_user(si->si_ptr, &u_p->si_ptr);
+		fallthrough;
+	case SI_USER:
+		ret |= __xn_put_user(si->si_pid, &u_p->si_pid);
+		ret |= __xn_put_user(si->si_uid, &u_p->si_uid);
+	}
+
+	return ret;
+}
+
+static int signal_wait(sigset_t *set, xnticks_t timeout,
+		       void __user *u_si, bool compat)
+{
+	struct cobalt_sigpending *sigp = NULL;
+	struct cobalt_sigwait_context swc;
+	struct cobalt_thread *curr;
+	int ret, sig, n, overrun;
+	unsigned long *p, *t, m;
+	struct siginfo si, *sip;
+	struct list_head *sigq;
+	spl_t s;
+
+	curr = cobalt_current_thread();
+	XENO_BUG_ON(COBALT, curr == NULL);
+
+	if (u_si && !access_wok(u_si, sizeof(*u_si)))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+check:
+	if (sigisemptyset(&curr->sigpending))
+		/* Most common/fast path. */
+		goto wait;
+
+	p = curr->sigpending.sig; /* pending */
+	t = set->sig;		  /* tested */
+
+	for (n = 0, sig = 0; n < _NSIG_WORDS; ++n) {
+		m = *p++ & *t++;
+		if (m == 0)
+			continue;
+		sig = ffz(~m) +  n *_NSIG_BPW + 1;
+		break;
+	}
+
+	if (sig) {
+		sigq = curr->sigqueues + sig - 1;
+		if (list_empty(sigq)) {
+			sigdelset(&curr->sigpending, sig);
+			goto check;
+		}
+		sigp = list_get_entry(sigq, struct cobalt_sigpending, next);
+		INIT_LIST_HEAD(&sigp->next); /* Mark sigp as unlinked. */
+		if (list_empty(sigq))
+			sigdelset(&curr->sigpending, sig);
+		sip = &sigp->si;
+		ret = 0;
+		goto done;
+	}
+
+wait:
+	if (timeout == XN_NONBLOCK) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+	swc.set = set;
+	swc.si = &si;
+	xnthread_prepare_wait(&swc.wc);
+	list_add_tail(&curr->signext, &curr->process->sigwaiters);
+	ret = xnsynch_sleep_on(&curr->sigwait, timeout, XN_RELATIVE);
+	if (ret) {
+		list_del(&curr->signext);
+		ret = ret & XNBREAK ? -EINTR : -EAGAIN;
+		goto fail;
+	}
+	sig = si.si_signo;
+	sip = &si;
+done:
+	 /*
+	  * si_overrun raises a nasty issue since we have to
+	  * collect+clear it atomically before we drop the lock,
+	  * although we don't know in advance if any extension would
+	  * use it along with the additional si_codes it may provide,
+	  * but we must drop the lock before running the
+	  * signal_copyinfo handler.
+	  *
+	  * Observing that si_overrun is likely the only "unstable"
+	  * data from the signal information which might change under
+	  * our feet while we copy the bits to userland, we collect it
+	  * here from the atomic section for all unknown si_codes,
+	  * then pass its value to the signal_copyinfo handler.
+	  */
+	switch (sip->si_code) {
+	case SI_TIMER:
+		overrun = cobalt_timer_deliver(curr, sip->si_tid);
+		break;
+	case SI_USER:
+	case SI_MESGQ:
+	case SI_QUEUE:
+		overrun = 0;
+		break;
+	default:
+		overrun = sip->si_overrun;
+		if (overrun)
+			sip->si_overrun = 0;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (u_si == NULL)
+		goto out;	/* Return signo only. */
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (compat) {
+		ret = sys32_put_siginfo(u_si, sip, overrun);
+		if (!ret)
+			/* Allow an extended target to receive more data. */
+			cobalt_call_extension(signal_copyinfo_compat,
+					      &curr->extref, ret, u_si, sip,
+					      overrun);
+	} else
+#endif
+	{
+		ret = signal_put_siginfo(u_si, sip, overrun);
+		if (!ret)
+			/* Allow an extended target to receive more data. */
+			cobalt_call_extension(signal_copyinfo, &curr->extref,
+					      ret, u_si, sip, overrun);
+	}
+
+out:
+	/*
+	 * If we pulled the signal information from a sigpending
+	 * block, release it to the free pool if applicable.
+	 */
+	if (sigp &&
+	    (void *)sigp >= sigpending_mem &&
+	    (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) {
+		xnlock_get_irqsave(&nklock, s);
+		list_add_tail(&sigp->next, &sigpending_pool);
+		xnlock_put_irqrestore(&nklock, s);
+		/* no more ref. to sigp beyond this point. */
+	}
+
+	return ret ? -EFAULT : sig;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sigwait(sigset_t *set)
+{
+	return signal_wait(set, XN_INFINITE, NULL, false);
+}
+
+COBALT_SYSCALL(sigwait, primary,
+	       (const sigset_t __user *u_set, int __user *u_sig))
+{
+	sigset_t set;
+	int sig;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	sig = signal_wait(&set, XN_INFINITE, NULL, false);
+	if (sig < 0)
+		return sig;
+
+	return cobalt_copy_to_user(u_sig, &sig, sizeof(*u_sig));
+}
+
+int __cobalt_sigtimedwait(sigset_t *set,
+			  const struct timespec64 *timeout,
+			  void __user *u_si,
+			  bool compat)
+{
+	xnticks_t ticks;
+
+	if (!timespec64_valid(timeout))
+		return -EINVAL;
+	ticks = ts2ns(timeout);
+	if (ticks++ == 0)
+		ticks = XN_NONBLOCK;
+
+	return signal_wait(set, ticks, u_si, compat);
+}
+
+COBALT_SYSCALL(sigtimedwait, nonrestartable,
+	       (const sigset_t __user *u_set,
+		struct siginfo __user *u_si,
+		const struct __user_old_timespec __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	if (cobalt_copy_from_user(&timeout, u_timeout, sizeof(timeout)))
+		return -EFAULT;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, false);
+}
+
+COBALT_SYSCALL(sigtimedwait64, nonrestartable,
+	       (const sigset_t __user *u_set,
+		struct siginfo __user *u_si,
+		const struct __kernel_timespec __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	if (cobalt_get_timespec64(&timeout, u_timeout))
+		return -EFAULT;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, false);
+}
+
+int __cobalt_sigwaitinfo(sigset_t *set,
+			 void __user *u_si,
+			 bool compat)
+{
+	return signal_wait(set, XN_INFINITE, u_si, compat);
+}
+
+COBALT_SYSCALL(sigwaitinfo, nonrestartable,
+	       (const sigset_t __user *u_set, struct siginfo __user *u_si))
+{
+	sigset_t set;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	return __cobalt_sigwaitinfo(&set, u_si, false);
+}
+
+COBALT_SYSCALL(sigpending, primary, (old_sigset_t __user *u_set))
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+
+	return cobalt_copy_to_user(u_set, &curr->sigpending, sizeof(*u_set));
+}
+
+int __cobalt_kill(struct cobalt_thread *thread, int sig, int group) /* nklocked, IRQs off */
+{
+	struct cobalt_sigpending *sigp;
+	int ret = 0;
+
+	/*
+	 * We have undocumented pseudo-signals to suspend/resume/unblock
+	 * threads, force them out of primary mode or even demote them
+	 * to the weak scheduling class/priority. Process them early,
+	 * before anyone can notice...
+	 */
+	switch(sig) {
+	case 0:
+		/* Check for existence only. */
+		break;
+	case SIGSUSP:
+		/*
+		 * All callers shall be tagged as conforming calls, so
+		 * self-directed suspension can only happen from
+		 * primary mode. Yummie.
+		 */
+		xnthread_suspend(&thread->threadbase, XNSUSP,
+				 XN_INFINITE, XN_RELATIVE, NULL);
+		if (&thread->threadbase == xnthread_current() &&
+		    xnthread_test_info(&thread->threadbase, XNBREAK))
+			ret = -EINTR;
+		break;
+	case SIGRESM:
+		xnthread_resume(&thread->threadbase, XNSUSP);
+		goto resched;
+	case SIGRELS:
+		xnthread_unblock(&thread->threadbase);
+		goto resched;
+	case SIGKICK:
+		xnthread_kick(&thread->threadbase);
+		goto resched;
+	case SIGDEMT:
+		xnthread_demote(&thread->threadbase);
+		goto resched;
+	case 1 ... _NSIG:
+		sigp = cobalt_signal_alloc();
+		if (sigp) {
+			sigp->si.si_signo = sig;
+			sigp->si.si_errno = 0;
+			sigp->si.si_code = SI_USER;
+			sigp->si.si_pid = task_pid_nr(current);
+			sigp->si.si_uid = get_current_uuid();
+			if (cobalt_signal_send(thread, sigp, group) <= 0)
+				cobalt_signal_free(sigp);
+		}
+	resched:
+		xnsched_run();
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(kill, conforming, (pid_t pid, int sig))
+{
+	struct cobalt_thread *thread;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	thread = cobalt_thread_find(pid);
+	if (thread == NULL)
+		ret = -ESRCH;
+	else
+		ret = __cobalt_kill(thread, sig, 1);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sigqueue(pid_t pid, int sig, const union sigval *value)
+{
+	struct cobalt_sigpending *sigp;
+	struct cobalt_thread *thread;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	thread = cobalt_thread_find(pid);
+	if (thread == NULL) {
+		ret = -ESRCH;
+		goto out;
+	}
+
+	switch(sig) {
+	case 0:
+		/* Check for existence only. */
+		break;
+	case 1 ... _NSIG:
+		sigp = cobalt_signal_alloc();
+		if (sigp) {
+			sigp->si.si_signo = sig;
+			sigp->si.si_errno = 0;
+			sigp->si.si_code = SI_QUEUE;
+			sigp->si.si_pid = task_pid_nr(current);
+			sigp->si.si_uid = get_current_uuid();
+			sigp->si.si_value = *value;
+			if (cobalt_signal_send(thread, sigp, 1) <= 0)
+				cobalt_signal_free(sigp);
+			else
+				xnsched_run();
+		}
+		break;
+	default:
+		/* Cobalt pseudo-signals are never process-directed. */
+		ret = __cobalt_kill(thread, sig, 0);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__cobalt_sigqueue);
+
+COBALT_SYSCALL(sigqueue, conforming,
+	       (pid_t pid, int sig, const union sigval __user *u_value))
+{
+	union sigval val;
+	int ret;
+
+	ret = cobalt_copy_from_user(&val, u_value, sizeof(val));
+
+	return ret ?: __cobalt_sigqueue(pid, sig, &val);
+}
+
+__init int cobalt_signal_init(void)
+{
+	struct cobalt_sigpending *sigp;
+
+	sigpending_mem = xnheap_vmalloc(__SIGPOOL_SIZE);
+	if (sigpending_mem == NULL)
+		return -ENOMEM;
+
+	for (sigp = sigpending_mem;
+	     (void *)sigp < sigpending_mem + __SIGPOOL_SIZE; sigp++)
+		list_add_tail(&sigp->next, &sigpending_pool);
+
+	return 0;
+}
+
+__init void cobalt_signal_cleanup(void)
+{
+	xnheap_vfree(sigpending_mem);
+}
+++ linux-patched/kernel/xenomai/posix/io.c	2022-03-21 12:58:29.050892267 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/extension.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <cobalt/kernel/compat.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/time.h>
+#include <xenomai/rtdm/internal.h>
+#include "process.h"
+#include "internal.h"
+#include "clock.h"
+#include "io.h"
+
+COBALT_SYSCALL(open, lostage,
+	       (const char __user *u_path, int oflag))
+{
+	struct filename *filename;
+	int ufd;
+
+	filename = getname(u_path);
+	if (IS_ERR(filename))
+		return PTR_ERR(filename);
+
+	ufd = __rtdm_dev_open(filename->name, oflag);
+	putname(filename);
+
+	return ufd;
+}
+
+COBALT_SYSCALL(socket, lostage,
+	       (int protocol_family, int socket_type, int protocol))
+{
+	return __rtdm_dev_socket(protocol_family, socket_type, protocol);
+}
+
+COBALT_SYSCALL(close, lostage, (int fd))
+{
+	return rtdm_fd_close(fd, 0);
+}
+
+COBALT_SYSCALL(fcntl, current, (int fd, int cmd, long arg))
+{
+	return rtdm_fd_fcntl(fd, cmd, arg);
+}
+
+COBALT_SYSCALL(ioctl, handover,
+	       (int fd, unsigned int request, void __user *arg))
+{
+	return rtdm_fd_ioctl(fd, request, arg);
+}
+
+COBALT_SYSCALL(read, handover,
+	       (int fd, void __user *buf, size_t size))
+{
+	return rtdm_fd_read(fd, buf, size);
+}
+
+COBALT_SYSCALL(write, handover,
+	       (int fd, const void __user *buf, size_t size))
+{
+	return rtdm_fd_write(fd, buf, size);
+}
+
+COBALT_SYSCALL(recvmsg, handover,
+	       (int fd, struct user_msghdr __user *umsg, int flags))
+{
+	struct user_msghdr m;
+	ssize_t ret;
+
+	ret = cobalt_copy_from_user(&m, umsg, sizeof(m));
+	if (ret)
+		return ret;
+
+	ret = rtdm_fd_recvmsg(fd, &m, flags);
+	if (ret < 0)
+		return ret;
+
+	return cobalt_copy_to_user(umsg, &m, sizeof(*umsg)) ?: ret;
+}
+
+static int get_timespec(struct timespec64 *ts,
+			const void __user *u_ts)
+{
+	return cobalt_get_u_timespec(ts, u_ts);
+}
+
+static int get_mmsg(struct mmsghdr *mmsg, void __user *u_mmsg)
+{
+	return cobalt_copy_from_user(mmsg, u_mmsg, sizeof(*mmsg));
+}
+
+static int put_mmsg(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct mmsghdr __user **p = (struct mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return cobalt_copy_to_user(q, mmsg, sizeof(*q));
+}
+
+COBALT_SYSCALL(recvmmsg, primary,
+	       (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		unsigned int flags, struct __user_old_timespec __user *u_timeout))
+{
+	return __rtdm_fd_recvmmsg(fd, u_msgvec, vlen, flags, u_timeout,
+				  get_mmsg, put_mmsg, get_timespec);
+}
+
+COBALT_SYSCALL(recvmmsg64, primary,
+	       (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		unsigned int flags, struct __kernel_timespec __user *u_timeout))
+{
+	return __rtdm_fd_recvmmsg64(fd, u_msgvec, vlen, flags, u_timeout,
+				    get_mmsg, put_mmsg);
+}
+
+COBALT_SYSCALL(sendmsg, handover,
+	       (int fd, struct user_msghdr __user *umsg, int flags))
+{
+	struct user_msghdr m;
+	int ret;
+
+	ret = cobalt_copy_from_user(&m, umsg, sizeof(m));
+
+	return ret ?: rtdm_fd_sendmsg(fd, &m, flags);
+}
+
+static int put_mmsglen(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct mmsghdr __user **p = (struct mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return __xn_put_user(mmsg->msg_len, &q->msg_len);
+}
+
+COBALT_SYSCALL(sendmmsg, primary,
+	       (int fd, struct mmsghdr __user *u_msgvec,
+		unsigned int vlen, unsigned int flags))
+{
+	return __rtdm_fd_sendmmsg(fd, u_msgvec, vlen, flags,
+				  get_mmsg, put_mmsglen);
+}
+
+COBALT_SYSCALL(mmap, lostage,
+	       (int fd, struct _rtdm_mmap_request __user *u_rma,
+	        void __user **u_addrp))
+{
+	struct _rtdm_mmap_request rma;
+	void *u_addr = NULL;
+	int ret;
+
+	ret = cobalt_copy_from_user(&rma, u_rma, sizeof(rma));
+	if (ret)
+		return ret;
+
+	ret = rtdm_fd_mmap(fd, &rma, &u_addr);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_addrp, &u_addr, sizeof(u_addr));
+}
+
+static int __cobalt_first_fd_valid_p(fd_set *fds[XNSELECT_MAX_TYPES], int nfds)
+{
+	int i, fd;
+
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (fds[i]
+		    && (fd = find_first_bit(fds[i]->fds_bits, nfds)) < nfds)
+			return rtdm_fd_valid_p(fd);
+
+	/* All empty is correct, used as a "sleep" mechanism by strange
+	   applications. */
+	return 1;
+}
+
+static int __cobalt_select_bind_all(struct xnselector *selector,
+				    fd_set *fds[XNSELECT_MAX_TYPES], int nfds)
+{
+	bool first_fd = true;
+	unsigned fd, type;
+	int err;
+
+	for (type = 0; type < XNSELECT_MAX_TYPES; type++) {
+		fd_set *set = fds[type];
+		if (set)
+			for (fd = find_first_bit(set->fds_bits, nfds);
+			     fd < nfds;
+			     fd = find_next_bit(set->fds_bits, nfds, fd + 1)) {
+				err = rtdm_fd_select(fd, selector, type);
+				if (err) {
+					/*
+					 * Do not needlessly signal "retry
+					 * under Linux" for mixed fd sets.
+					 */
+					if (err == -EADV && !first_fd)
+						return -EBADF;
+					return err;
+				}
+				first_fd = false;
+			}
+	}
+
+	return 0;
+}
+
+int __cobalt_select(int nfds, void __user *u_rfds, void __user *u_wfds,
+		    void __user *u_xfds, void __user *u_tv, bool compat)
+{
+	void __user *ufd_sets[XNSELECT_MAX_TYPES] = {
+		[XNSELECT_READ] = u_rfds,
+		[XNSELECT_WRITE] = u_wfds,
+		[XNSELECT_EXCEPT] = u_xfds
+	};
+	fd_set *in_fds[XNSELECT_MAX_TYPES] = {NULL, NULL, NULL};
+	fd_set *out_fds[XNSELECT_MAX_TYPES] = {NULL, NULL, NULL};
+	fd_set in_fds_storage[XNSELECT_MAX_TYPES],
+		out_fds_storage[XNSELECT_MAX_TYPES];
+	xnticks_t timeout = XN_INFINITE;
+	struct restart_block *restart;
+	xntmode_t mode = XN_RELATIVE;
+	struct xnselector *selector;
+	struct xnthread *curr;
+	struct __kernel_old_timeval tv;
+	size_t fds_size;
+	int i, err;
+
+	curr = xnthread_current();
+
+	if (u_tv) {
+		if (xnthread_test_localinfo(curr, XNSYSRST)) {
+			xnthread_clear_localinfo(curr, XNSYSRST);
+
+			restart = cobalt_get_restart_block(current);
+			timeout = restart->nanosleep.expires;
+
+			if (restart->fn != cobalt_restart_syscall_placeholder) {
+				err = -EINTR;
+				goto out;
+			}
+		} else {
+#ifdef CONFIG_XENO_ARCH_SYS3264
+			if (compat) {
+				if (sys32_get_timeval(&tv, u_tv))
+					return -EFAULT;
+			} else
+#endif
+			{
+				if (!access_wok(u_tv, sizeof(tv))
+				    || cobalt_copy_from_user(&tv, u_tv,
+							     sizeof(tv)))
+					return -EFAULT;
+			}
+
+			if (tv.tv_usec >= 1000000)
+				return -EINVAL;
+
+			timeout = clock_get_ticks(CLOCK_MONOTONIC) + tv2ns(&tv);
+		}
+
+		mode = XN_ABSOLUTE;
+	}
+
+	fds_size = __FDELT__(nfds + __NFDBITS__ - 1) * sizeof(long);
+
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (ufd_sets[i]) {
+			in_fds[i] = &in_fds_storage[i];
+			out_fds[i] = &out_fds_storage[i];
+#ifdef CONFIG_XENO_ARCH_SYS3264
+			if (compat) {
+				if (sys32_get_fdset(in_fds[i], ufd_sets[i],
+						    fds_size))
+					return -EFAULT;
+			} else
+#endif
+			{
+				if (!access_wok((void __user *) ufd_sets[i],
+						sizeof(fd_set))
+				    || cobalt_copy_from_user(in_fds[i],
+							     (void __user *)ufd_sets[i],
+							     fds_size))
+					return -EFAULT;
+			}
+		}
+
+	selector = curr->selector;
+	if (!selector) {
+		/* This function may be called from pure Linux fd_sets, we want
+		   to avoid the xnselector allocation in this case, so, we do a
+		   simple test: test if the first file descriptor we find in the
+		   fd_set is an RTDM descriptor or a message queue descriptor. */
+		if (!__cobalt_first_fd_valid_p(in_fds, nfds))
+			return -EADV;
+
+		selector = xnmalloc(sizeof(*curr->selector));
+		if (selector == NULL)
+			return -ENOMEM;
+		xnselector_init(selector);
+		curr->selector = selector;
+
+		/* Bind directly the file descriptors, we do not need to go
+		   through xnselect returning -ECHRNG */
+		err = __cobalt_select_bind_all(selector, in_fds, nfds);
+		if (err)
+			return err;
+	}
+
+	do {
+		err = xnselect(selector, out_fds, in_fds, nfds, timeout, mode);
+		if (err == -ECHRNG) {
+			int bind_err = __cobalt_select_bind_all(selector,
+								out_fds, nfds);
+			if (bind_err)
+				return bind_err;
+		}
+	} while (err == -ECHRNG);
+
+	if (err == -EINTR && signal_pending(current)) {
+		xnthread_set_localinfo(curr, XNSYSRST);
+
+		restart = cobalt_get_restart_block(current);
+		restart->fn = cobalt_restart_syscall_placeholder;
+		restart->nanosleep.expires = timeout;
+
+		return -ERESTARTSYS;
+	}
+
+out:
+	if (u_tv && (err > 0 || err == -EINTR)) {
+		xnsticks_t diff = timeout - clock_get_ticks(CLOCK_MONOTONIC);
+		if (diff > 0)
+			ticks2tv(&tv, diff);
+		else
+			tv.tv_sec = tv.tv_usec = 0;
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+		if (compat) {
+			if (sys32_put_timeval(u_tv, &tv))
+				return -EFAULT;
+		} else
+#endif
+		{
+			if (cobalt_copy_to_user(u_tv, &tv, sizeof(tv)))
+				return -EFAULT;
+		}
+	}
+
+	if (err >= 0)
+		for (i = 0; i < XNSELECT_MAX_TYPES; i++) {
+			if (!ufd_sets[i])
+				continue;
+#ifdef CONFIG_XENO_ARCH_SYS3264
+			if (compat) {
+				if (sys32_put_fdset(ufd_sets[i], out_fds[i],
+						    sizeof(fd_set)))
+					return -EFAULT;
+			} else
+#endif
+			{
+				if (cobalt_copy_to_user((void __user *)ufd_sets[i],
+							out_fds[i], sizeof(fd_set)))
+					return -EFAULT;
+			}
+		}
+	return err;
+}
+
+/* int select(int, fd_set *, fd_set *, fd_set *, struct __kernel_old_timeval *) */
+COBALT_SYSCALL(select, primary,
+	       (int nfds,
+		fd_set __user *u_rfds,
+		fd_set __user *u_wfds,
+		fd_set __user *u_xfds,
+		struct __kernel_old_timeval __user *u_tv))
+{
+	return __cobalt_select(nfds, u_rfds, u_wfds, u_xfds, u_tv, false);
+}
+++ linux-patched/kernel/xenomai/posix/extension.h	2022-03-21 12:58:29.047892296 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/sem.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_EXTENSION_H
+#define _COBALT_POSIX_EXTENSION_H
+
+#include <linux/time.h>
+#include <linux/list.h>
+
+#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION
+
+#include <cobalt/kernel/thread.h>
+
+struct cobalt_timer;
+struct cobalt_sigpending;
+struct cobalt_extref;
+struct siginfo;
+struct xnsched_class;
+union xnsched_policy_param;
+
+struct cobalt_extension {
+	struct xnthread_personality core;
+	struct {
+		struct cobalt_thread *
+		(*timer_init)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */
+			      const struct sigevent *__restrict__ evp);
+		int (*timer_settime)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */
+				     const struct itimerspec64 *__restrict__ value,
+				     int flags);
+		int (*timer_gettime)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */
+				     struct itimerspec64 *__restrict__ value);
+		int (*timer_delete)(struct cobalt_extref *reftimer); /* nklocked, IRQs off. */
+		int (*timer_cleanup)(struct cobalt_extref *reftimer); /* nklocked, IRQs off. */
+		int (*signal_deliver)(struct cobalt_extref *refthread,
+				      struct siginfo *si,
+				      struct cobalt_sigpending *sigp);
+		int (*signal_queue)(struct cobalt_extref *refthread,
+				    struct cobalt_sigpending *sigp);
+		int (*signal_copyinfo)(struct cobalt_extref *refthread,
+				       void __user *u_si,
+				       const struct siginfo *si,
+				       int overrun);
+		int (*signal_copyinfo_compat)(struct cobalt_extref *refthread,
+					      void __user *u_si,
+					      const struct siginfo *si,
+					      int overrun);
+		int (*sched_yield)(struct cobalt_extref *curref);
+		int (*thread_setsched)(struct cobalt_extref *refthread, /* nklocked, IRQs off. */
+				       struct xnsched_class *sched_class,
+				       union xnsched_policy_param *param);
+	} ops;
+};
+
+struct cobalt_extref {
+	struct cobalt_extension *extension;
+	struct list_head next;
+	void *private;
+};
+
+static inline void cobalt_set_extref(struct cobalt_extref *ref,
+				     struct cobalt_extension *ext,
+				     void *priv)
+{
+	ref->extension = ext;
+	ref->private = priv;
+}
+
+/**
+ * All macros return non-zero if some thread-level extension code was
+ * called, leaving the output value into __ret. Otherwise, the __ret
+ * value is undefined.
+ */
+#define cobalt_initcall_extension(__extfn, __extref, __owner, __ret, __args...) \
+	({									\
+		int __val = 0;							\
+		if ((__owner) && (__owner)->extref.extension) {			\
+			(__extref)->extension = (__owner)->extref.extension;	\
+			if ((__extref)->extension->ops.__extfn) {		\
+				(__ret) = (__extref)->extension->ops.		\
+					__extfn(__extref, ##__args );		\
+				__val = 1;					\
+			}							\
+		} else								\
+			(__extref)->extension = NULL;				\
+		__val;								\
+	})
+		
+#define cobalt_call_extension(__extfn, __extref, __ret, __args...)	\
+	({								\
+		int __val = 0;						\
+		if ((__extref)->extension &&				\
+		    (__extref)->extension->ops.__extfn) {		\
+			(__ret) = (__extref)->extension->ops.		\
+				__extfn(__extref, ##__args );		\
+			__val = 1;					\
+		}							\
+		__val;							\
+	})
+
+#else /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+struct cobalt_extension;
+
+struct cobalt_extref {
+};
+
+static inline void cobalt_set_extref(struct cobalt_extref *ref,
+				     struct cobalt_extension *ext,
+				     void *priv)
+{
+}
+
+#define cobalt_initcall_extension(__extfn, __extref, __owner, __ret, __args...)	\
+	({ (void)(__owner); (void)(__ret); 0; })
+
+#define cobalt_call_extension(__extfn, __extref, __ret, __args...)	\
+	({ (void)(__ret); 0; })
+
+#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+#endif /* !_COBALT_POSIX_EXTENSION_H */
+++ linux-patched/kernel/xenomai/posix/sem.c	2022-03-21 12:58:29.043892335 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/timer.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ * Copyright (C) 2014,2015 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stddef.h>
+#include <linux/err.h>
+#include <cobalt/kernel/time.h>
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include "sem.h"
+#include <trace/events/cobalt-posix.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static int sem_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	return 0;
+}
+
+static struct xnvfile_regular_ops sem_vfile_ops = {
+	.show = sem_vfile_show,
+};
+
+static struct xnpnode_regular __sem_pnode = {
+	.node = {
+		.dirname = "sem",
+		.root = &posix_ptree,
+		.ops = &xnregistry_vfreg_ops,
+	},
+	.vfile = {
+		.ops = &sem_vfile_ops,
+	},
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __sem_pnode = {
+	.node = {
+		.dirname = "sem",
+	}
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+static inline struct cobalt_resources *sem_kqueue(struct cobalt_sem *sem)
+{
+	int pshared = !!(sem->flags & SEM_PSHARED);
+	return cobalt_current_resources(pshared);
+}
+
+static inline int sem_check(struct cobalt_sem *sem)
+{
+	if (sem == NULL || sem->magic != COBALT_SEM_MAGIC)
+		return -EINVAL;
+
+	if (sem->resnode.scope && sem->resnode.scope != sem_kqueue(sem))
+		return -EPERM;
+
+	return 0;
+}
+
+int __cobalt_sem_destroy(xnhandle_t handle)
+{
+	struct cobalt_sem *sem;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	sem = xnregistry_lookup(handle, NULL);
+	if (!cobalt_obj_active(sem, COBALT_SEM_MAGIC, typeof(*sem))) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (--sem->refs) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	cobalt_mark_deleted(sem);
+	if (!sem->pathname)
+		cobalt_del_resource(&sem->resnode);
+	if (xnsynch_destroy(&sem->synchbase) == XNSYNCH_RESCHED) {
+		xnsched_run();
+		ret = 1;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnregistry_remove(sem->resnode.handle);
+	if (sem->pathname)
+		putname(sem->pathname);
+
+	cobalt_umm_free(&cobalt_ppd_get(!!(sem->flags & SEM_PSHARED))->umm,
+			sem->state);
+
+	xnfree(sem);
+
+	return ret;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+struct cobalt_sem *
+__cobalt_sem_init(const char *name, struct cobalt_sem_shadow *sm,
+		  int flags, unsigned int value)
+{
+	struct cobalt_sem_state *state;
+	struct cobalt_sem *sem, *osem;
+	struct cobalt_ppd *sys_ppd;
+	int ret, sflags, pshared;
+	struct list_head *semq;
+	spl_t s;
+
+	if ((flags & SEM_PULSE) != 0 && value > 0) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	sem = xnmalloc(sizeof(*sem));
+	if (sem == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	pshared = !!(flags & SEM_PSHARED);
+	sys_ppd = cobalt_ppd_get(pshared);
+	state = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*state));
+	if (state == NULL) {
+		ret = -EAGAIN;
+		goto err_free_sem;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	semq = &cobalt_current_resources(pshared)->semq;
+	if ((sm->magic == COBALT_SEM_MAGIC && !list_empty(semq)) ||
+	    sm->magic == COBALT_NAMED_SEM_MAGIC) {
+		osem = xnregistry_lookup(sm->handle, NULL);
+		if (cobalt_obj_active(osem, COBALT_SEM_MAGIC, typeof(*osem))) {
+			ret = -EBUSY;
+			goto err_lock_put;
+		}
+	}
+
+	if (value > (unsigned)SEM_VALUE_MAX) {
+		ret = -EINVAL;
+		goto err_lock_put;
+	}
+
+	ret = xnregistry_enter(name ?: "", sem, &sem->resnode.handle,
+			       name ? &__sem_pnode.node : NULL);
+	if (ret < 0)
+		goto err_lock_put;
+
+	sem->magic = COBALT_SEM_MAGIC;
+	if (!name)
+		cobalt_add_resource(&sem->resnode, sem, pshared);
+	else
+		sem->resnode.scope = NULL;
+	sflags = flags & SEM_FIFO ? 0 : XNSYNCH_PRIO;
+	xnsynch_init(&sem->synchbase, sflags, NULL);
+
+	sem->state = state;
+	atomic_set(&state->value, value);
+	state->flags = flags;
+	sem->flags = flags;
+	sem->refs = name ? 2 : 1;
+	sem->pathname = NULL;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	__cobalt_sem_shadow_init(sem,
+			name ? COBALT_NAMED_SEM_MAGIC : COBALT_SEM_MAGIC, sm);
+
+	trace_cobalt_psem_init(name ?: "anon",
+			       sem->resnode.handle, flags, value);
+
+	return sem;
+
+err_lock_put:
+	xnlock_put_irqrestore(&nklock, s);
+	cobalt_umm_free(&sys_ppd->umm, state);
+err_free_sem:
+	xnfree(sem);
+out:
+	trace_cobalt_psem_init_failed(name ?: "anon", flags, value, ret);
+
+	return ERR_PTR(ret);
+}
+
+void __cobalt_sem_shadow_init(struct cobalt_sem *sem, __u32 magic,
+			      struct cobalt_sem_shadow *sm)
+{
+	__u32 flags = sem->state->flags;
+	struct cobalt_ppd *sys_ppd;
+
+	sys_ppd = cobalt_ppd_get(!!(flags & SEM_PSHARED));
+
+	sm->magic = magic;
+	sm->handle = sem->resnode.handle;
+	sm->state_offset = cobalt_umm_offset(&sys_ppd->umm, sem->state);
+	if (sem->state->flags & SEM_PSHARED)
+		sm->state_offset = -sm->state_offset;
+}
+
+static int sem_destroy(struct cobalt_sem_shadow *sm)
+{
+	struct cobalt_sem *sem;
+	int warn, ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (sm->magic != COBALT_SEM_MAGIC) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	sem = xnregistry_lookup(sm->handle, NULL);
+	ret = sem_check(sem);
+	if (ret)
+		goto fail;
+
+	if ((sem->flags & SEM_NOBUSYDEL) != 0 &&
+	    xnsynch_pended_p(&sem->synchbase)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	warn = sem->flags & SEM_WARNDEL;
+	cobalt_mark_deleted(sm);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = __cobalt_sem_destroy(sem->resnode.handle);
+
+	return warn ? ret : 0;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+static inline int do_trywait(struct cobalt_sem *sem)
+{
+	int ret;
+	
+	ret = sem_check(sem);
+	if (ret)
+		return ret;
+
+	if (atomic_sub_return(1, &sem->state->value) < 0)
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int sem_wait(xnhandle_t handle)
+{
+	struct cobalt_sem *sem;
+	int ret, info;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = do_trywait(sem);
+	if (ret != -EAGAIN)
+		goto out;
+
+	ret = 0;
+	info = xnsynch_sleep_on(&sem->synchbase, XN_INFINITE, XN_RELATIVE);
+	if (info & XNRMID) {
+		ret = -EINVAL;
+	} else if (info & XNBREAK) {
+		atomic_inc(&sem->state->value); /* undo do_trywait() */
+		ret = -EINTR;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user *u_sem,
+			   const struct timespec64 *ts)
+{
+	int ret, info;
+	bool validate_ts = true;
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	xntmode_t tmode;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_timedwait(handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	for (;;) {
+		sem = xnregistry_lookup(handle, NULL);
+		ret = do_trywait(sem);
+		if (ret != -EAGAIN)
+			break;
+
+		/*
+		 * POSIX states that the validity of the timeout spec
+		 * _need_ not be checked if the semaphore can be
+		 * locked immediately, we show this behavior despite
+		 * it's actually more complex, to keep some
+		 * applications ported to Linux happy.
+		 */
+		if (validate_ts) {
+			atomic_inc(&sem->state->value);
+			if (!ts) {
+				ret = -EFAULT;
+				break;
+			}
+			if (!timespec64_valid(ts)) {
+				ret = -EINVAL;
+				break;
+			}
+			validate_ts = false;
+			continue;
+		}
+
+		ret = 0;
+		tmode = sem->flags & SEM_RAWCLOCK ? XN_ABSOLUTE : XN_REALTIME;
+		info = xnsynch_sleep_on(&sem->synchbase, ts2ns(ts) + 1, tmode);
+		if (info & XNRMID)
+			ret = -EINVAL;
+		else if (info & (XNBREAK|XNTIMEO)) {
+			ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
+			atomic_inc(&sem->state->value);
+		}
+		break;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sem_timedwait64(struct cobalt_sem_shadow __user *u_sem,
+			     const struct __kernel_timespec __user *u_ts)
+{
+	int ret = 1;
+	struct timespec64 ts64;
+
+	if (u_ts)
+		ret = cobalt_get_timespec64(&ts64, u_ts);
+
+	return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64);
+}
+
+static int sem_post(xnhandle_t handle)
+{
+	struct cobalt_sem *sem;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = sem_check(sem);
+	if (ret)
+		goto out;
+
+	if (atomic_read(&sem->state->value) == SEM_VALUE_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (atomic_inc_return(&sem->state->value) <= 0) {
+		if (xnsynch_wakeup_one_sleeper(&sem->synchbase))
+			xnsched_run();
+	} else if (sem->flags & SEM_PULSE)
+		atomic_set(&sem->state->value, 0);
+out:	
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+static int sem_getvalue(xnhandle_t handle, int *value)
+{
+	struct cobalt_sem *sem;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = sem_check(sem);
+	if (ret) {
+		xnlock_put_irqrestore(&nklock, s);
+		return ret;
+	}
+
+	*value = atomic_read(&sem->state->value);
+	if ((sem->flags & SEM_REPORT) == 0 && *value < 0)
+		*value = 0;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+COBALT_SYSCALL(sem_init, current,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		int flags, unsigned int value))
+{
+	struct cobalt_sem_shadow sm;
+	struct cobalt_sem *sem;
+
+	if (cobalt_copy_from_user(&sm, u_sem, sizeof(sm)))
+		return -EFAULT;
+
+	if (flags & ~(SEM_FIFO|SEM_PULSE|SEM_PSHARED|SEM_REPORT|\
+		      SEM_WARNDEL|SEM_RAWCLOCK|SEM_NOBUSYDEL))
+		return -EINVAL;
+
+	sem = __cobalt_sem_init(NULL, &sm, flags, value);
+	if (IS_ERR(sem))
+		return PTR_ERR(sem);
+
+	return cobalt_copy_to_user(u_sem, &sm, sizeof(*u_sem));
+}
+
+COBALT_SYSCALL(sem_post, current,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	xnhandle_t handle;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_post(handle);
+
+	return sem_post(handle);
+}
+
+COBALT_SYSCALL(sem_wait, primary,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	xnhandle_t handle;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_wait(handle);
+
+	return sem_wait(handle);
+}
+
+COBALT_SYSCALL(sem_timedwait, primary,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		const struct __user_old_timespec __user *u_ts))
+{
+	int ret = 1;
+	struct timespec64 ts64;
+
+	if (u_ts)
+		ret = cobalt_get_u_timespec(&ts64, u_ts);
+
+	return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64);
+}
+
+COBALT_SYSCALL(sem_timedwait64, primary,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_sem_timedwait64(u_sem, u_ts);
+}
+
+COBALT_SYSCALL(sem_trywait, primary,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	int ret;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_trywait(handle);
+
+	xnlock_get_irqsave(&nklock, s);
+	sem = xnregistry_lookup(handle, NULL);
+	ret = do_trywait(sem);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sem_getvalue, current,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		int __user *u_sval))
+{
+	int ret, sval = -1;
+	xnhandle_t handle;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+
+	ret = sem_getvalue(handle, &sval);
+	trace_cobalt_psem_getvalue(handle, sval);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_sval, &sval, sizeof(sval));
+}
+
+COBALT_SYSCALL(sem_destroy, current,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	struct cobalt_sem_shadow sm;
+	int err;
+
+	if (cobalt_copy_from_user(&sm, u_sem, sizeof(sm)))
+		return -EFAULT;
+
+	trace_cobalt_psem_destroy(sm.handle);
+
+	err = sem_destroy(&sm);
+	if (err < 0)
+		return err;
+
+	return cobalt_copy_to_user(u_sem, &sm, sizeof(*u_sem)) ?: err;
+}
+
+COBALT_SYSCALL(sem_broadcast_np, current,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	spl_t s;
+	int ret;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_broadcast(handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = sem_check(sem);
+	if (ret == 0 && atomic_read(&sem->state->value) < 0) {
+		atomic_set(&sem->state->value, 0);
+		xnsynch_flush(&sem->synchbase, 0);
+		xnsched_run();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sem_inquire, current,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		struct cobalt_sem_info __user *u_info,
+		pid_t __user *u_waitlist,
+		size_t waitsz))
+{
+	int val = 0, nrwait = 0, nrpids, ret = 0;
+	unsigned long pstamp, nstamp = 0;
+	struct cobalt_sem_info info;
+	pid_t *t = NULL, fbuf[16];
+	struct xnthread *thread;
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_inquire(handle);
+
+	nrpids = waitsz / sizeof(pid_t);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	for (;;) {
+		pstamp = nstamp;
+		sem = xnregistry_lookup(handle, &nstamp);
+		if (sem == NULL || sem->magic != COBALT_SEM_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+		/*
+		 * Allocate memory to return the wait list without
+		 * holding any lock, then revalidate the handle.
+		 */
+		if (t == NULL) {
+			val = atomic_read(&sem->state->value);
+			if (val >= 0 || u_waitlist == NULL)
+				break;
+			xnlock_put_irqrestore(&nklock, s);
+			if (nrpids > -val)
+				nrpids = -val;
+			if (-val <= ARRAY_SIZE(fbuf))
+				t = fbuf; /* Use fast buffer. */
+			else {
+				t = xnmalloc(-val * sizeof(pid_t));
+				if (t == NULL)
+					return -ENOMEM;
+			}
+			xnlock_get_irqsave(&nklock, s);
+		} else if (pstamp == nstamp)
+			break;
+		else if (val != atomic_read(&sem->state->value)) {
+			xnlock_put_irqrestore(&nklock, s);
+			if (t != fbuf)
+				xnfree(t);
+			t = NULL;
+			xnlock_get_irqsave(&nklock, s);
+		}
+	}
+
+	info.flags = sem->flags;
+	info.value = (sem->flags & SEM_REPORT) || val >= 0 ? val : 0;
+	info.nrwait = val < 0 ? -val : 0;
+
+	if (xnsynch_pended_p(&sem->synchbase) && u_waitlist != NULL) {
+		xnsynch_for_each_sleeper(thread, &sem->synchbase) {
+			if (nrwait >= nrpids)
+				break;
+			t[nrwait++] = xnthread_host_pid(thread);
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = cobalt_copy_to_user(u_info, &info, sizeof(info));
+	if (ret == 0 && nrwait > 0)
+		ret = cobalt_copy_to_user(u_waitlist, t, nrwait * sizeof(pid_t));
+
+	if (t && t != fbuf)
+		xnfree(t);
+
+	return ret ?: nrwait;
+}
+
+void cobalt_sem_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	int named, ret;
+
+	sem = container_of(node, struct cobalt_sem, resnode);
+	named = (sem->flags & SEM_NAMED) != 0;
+	handle = node->handle;
+	xnlock_put_irqrestore(&nklock, s);
+	ret = __cobalt_sem_destroy(handle);
+	if (named && ret == -EBUSY)
+		xnregistry_unlink(xnregistry_key(handle));
+}
+++ linux-patched/kernel/xenomai/posix/timer.h	2022-03-21 12:58:29.040892365 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/corectl.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_TIMER_H
+#define _COBALT_POSIX_TIMER_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <cobalt/kernel/timer.h>
+#include <xenomai/posix/signal.h>
+#include <xenomai/posix/syscall.h>
+
+struct cobalt_timer {
+	struct xntimer timerbase;
+	timer_t id;
+	int overruns;
+	clockid_t clockid;
+	pid_t target;
+	struct cobalt_sigpending sigp;
+	struct cobalt_extref extref;
+};
+
+int cobalt_timer_deliver(struct cobalt_thread *waiter,
+			 timer_t timerid);
+
+void cobalt_timer_reclaim(struct cobalt_process *p);
+
+static inline timer_t cobalt_timer_id(const struct cobalt_timer *timer)
+{
+	return timer->id;
+}
+
+struct cobalt_timer *
+cobalt_timer_by_id(struct cobalt_process *p, timer_t timer_id);
+
+void cobalt_timer_handler(struct xntimer *xntimer);
+
+void __cobalt_timer_getval(struct xntimer *__restrict__ timer, 
+			   struct itimerspec64 *__restrict__ value);
+
+int __cobalt_timer_setval(struct xntimer *__restrict__ timer, int clock_flag, 
+			  const struct itimerspec64 *__restrict__ value);
+
+int __cobalt_timer_create(clockid_t clock,
+			  const struct sigevent *sev,
+			  timer_t __user *u_tm);
+
+int __cobalt_timer_settime(timer_t timerid, int flags,
+			   const struct itimerspec64 *__restrict__ value,
+			   struct itimerspec64 *__restrict__ ovalue);
+
+int __cobalt_timer_gettime(timer_t timerid, struct itimerspec64 *value);
+
+COBALT_SYSCALL_DECL(timer_create,
+		    (clockid_t clock,
+		     const struct sigevent __user *u_sev,
+		     timer_t __user *u_tm));
+
+COBALT_SYSCALL_DECL(timer_delete, (timer_t tm));
+
+COBALT_SYSCALL_DECL(timer_settime,
+		    (timer_t tm, int flags,
+		     const struct __user_old_itimerspec __user *u_newval,
+		     struct __user_old_itimerspec __user *u_oldval));
+
+COBALT_SYSCALL_DECL(timer_gettime,
+		    (timer_t tm, struct __user_old_itimerspec __user *u_val));
+
+COBALT_SYSCALL_DECL(timer_getoverrun, (timer_t tm));
+
+#endif /* !_COBALT_POSIX_TIMER_H */
+++ linux-patched/kernel/xenomai/posix/corectl.c	2022-03-21 12:58:29.036892404 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/timerfd.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+#include <linux/atomic.h>
+#include <linux/printk.h>
+#include <cobalt/kernel/init.h>
+#include <cobalt/kernel/thread.h>
+#include <xenomai/version.h>
+#include <pipeline/tick.h>
+#include <asm/xenomai/syscall.h>
+#include "corectl.h"
+
+static BLOCKING_NOTIFIER_HEAD(config_notifier_list);
+
+static int do_conf_option(int option, void __user *u_buf, size_t u_bufsz)
+{
+	struct cobalt_config_vector vec;
+	int ret, val = 0;
+
+	if (option <= _CC_COBALT_GET_CORE_STATUS && u_bufsz < sizeof(val))
+		return -EINVAL;
+
+	switch (option) {
+	case _CC_COBALT_GET_VERSION:
+		val = XENO_VERSION_CODE;
+		break;
+	case _CC_COBALT_GET_NR_PIPES:
+#ifdef CONFIG_XENO_OPT_PIPE
+		val = CONFIG_XENO_OPT_PIPE_NRDEV;
+#endif
+		break;
+	case _CC_COBALT_GET_NR_TIMERS:
+		val = CONFIG_XENO_OPT_NRTIMERS;
+		break;
+	case _CC_COBALT_GET_POLICIES:
+		val = _CC_COBALT_SCHED_FIFO|_CC_COBALT_SCHED_RR;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK))
+			val |= _CC_COBALT_SCHED_WEAK;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_SPORADIC))
+			val |= _CC_COBALT_SCHED_SPORADIC;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_QUOTA))
+			val |= _CC_COBALT_SCHED_QUOTA;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_TP))
+			val |= _CC_COBALT_SCHED_TP;
+		break;
+	case _CC_COBALT_GET_DEBUG:
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_COBALT))
+			val |= _CC_COBALT_DEBUG_ASSERT;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_CONTEXT))
+			val |= _CC_COBALT_DEBUG_CONTEXT;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LOCKING))
+			val |= _CC_COBALT_DEBUG_LOCKING;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_USER))
+			val |= _CC_COBALT_DEBUG_USER;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED))
+			val |= _CC_COBALT_DEBUG_MUTEX_RELAXED;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP))
+			val |= _CC_COBALT_DEBUG_MUTEX_SLEEP;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LEGACY))
+			val |= _CC_COBALT_DEBUG_LEGACY;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_TRACE_RELAX))
+			val |= _CC_COBALT_DEBUG_TRACE_RELAX;
+		if (IS_ENABLED(CONFIG_XENO_DRIVERS_RTNET_CHECKED))
+			val |= _CC_COBALT_DEBUG_NET;
+		break;
+	case _CC_COBALT_GET_WATCHDOG:
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+		val = CONFIG_XENO_OPT_WATCHDOG_TIMEOUT;
+#endif
+		break;
+	case _CC_COBALT_GET_CORE_STATUS:
+		val = realtime_core_state();
+		break;
+	default:
+		if (is_primary_domain())
+			/* Switch to secondary mode first. */
+			return -ENOSYS;
+		vec.u_buf = u_buf;
+		vec.u_bufsz = u_bufsz;
+		ret = blocking_notifier_call_chain(&config_notifier_list,
+						   option, &vec);
+		if (ret == NOTIFY_DONE)
+			return -EINVAL; /* Nobody cared. */
+		return notifier_to_errno(ret);
+	}
+
+	ret = cobalt_copy_to_user(u_buf, &val, sizeof(val));
+
+	return ret ? -EFAULT : 0;
+}
+
+static int stop_services(const void __user *u_buf, size_t u_bufsz)
+{
+	const u32 final_grace_period = 3; /* seconds */
+	enum cobalt_run_states state;
+	__u32 grace_period;
+	int ret;
+
+	/*
+	 * XXX: we don't have any syscall for unbinding a thread from
+	 * the Cobalt core, so we deny real-time threads from stopping
+	 * Cobalt services. i.e. _CC_COBALT_STOP_CORE must be issued
+	 * from a plain regular linux thread.
+	 */
+	if (xnthread_current())
+		return -EPERM;
+
+	if (u_bufsz != sizeof(__u32))
+		return -EINVAL;
+
+	ret = cobalt_copy_from_user(&grace_period,
+				    u_buf, sizeof(grace_period));
+	if (ret)
+		return ret;
+
+	state = atomic_cmpxchg(&cobalt_runstate,
+			       COBALT_STATE_RUNNING,
+			       COBALT_STATE_TEARDOWN);
+	switch (state) {
+	case COBALT_STATE_STOPPED:
+		break;
+	case COBALT_STATE_RUNNING:
+		/* Kill user threads. */
+		ret = xnthread_killall(grace_period, XNUSER);
+		if (ret) {
+			set_realtime_core_state(state);
+			return ret;
+		}
+		cobalt_call_state_chain(COBALT_STATE_TEARDOWN);
+		/* Kill lingering RTDM tasks. */
+		ret = xnthread_killall(final_grace_period, 0);
+		if (ret == -EAGAIN)
+			printk(XENO_WARNING "some RTDM tasks won't stop");
+		pipeline_uninstall_tick_proxy();
+		set_realtime_core_state(COBALT_STATE_STOPPED);
+		printk(XENO_INFO "services stopped\n");
+		break;
+	default:
+		ret = -EINPROGRESS;
+	}
+
+	return ret;
+}
+
+static int start_services(void)
+{
+	enum cobalt_run_states state;
+	int ret = 0;
+
+	state = atomic_cmpxchg(&cobalt_runstate,
+			       COBALT_STATE_STOPPED,
+			       COBALT_STATE_WARMUP);
+	switch (state) {
+	case COBALT_STATE_RUNNING:
+		break;
+	case COBALT_STATE_STOPPED:
+		pipeline_install_tick_proxy();
+		cobalt_call_state_chain(COBALT_STATE_WARMUP);
+		set_realtime_core_state(COBALT_STATE_RUNNING);
+		printk(XENO_INFO "services started\n");
+		break;
+	default:
+		ret = -EINPROGRESS;
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(corectl, probing,
+	       (int request, void __user *u_buf, size_t u_bufsz))
+{
+	int ret;
+	
+	switch (request) {
+	case _CC_COBALT_STOP_CORE:
+		ret = stop_services(u_buf, u_bufsz);
+		break;
+	case _CC_COBALT_START_CORE:
+		ret = start_services();
+		break;
+	default:
+		ret = do_conf_option(request, u_buf, u_bufsz);
+	}
+	
+	return ret;
+}
+
+void cobalt_add_config_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_register(&config_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_add_config_chain);
+
+void cobalt_remove_config_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&config_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_remove_config_chain);
+++ linux-patched/kernel/xenomai/posix/timerfd.c	2022-03-21 12:58:29.033892433 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/cond.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/timerfd.h>
+#include <linux/err.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/select.h>
+#include <rtdm/fd.h>
+#include "internal.h"
+#include "clock.h"
+#include "timer.h"
+#include "timerfd.h"
+
+struct cobalt_tfd {
+	int flags;
+	clockid_t clockid;
+	struct rtdm_fd fd;
+	struct xntimer timer;
+	DECLARE_XNSELECT(read_select);
+	struct itimerspec64 value;
+	struct xnsynch readers;
+	struct xnthread *target;
+};
+
+#define COBALT_TFD_TICKED	(1 << 2)
+
+#define COBALT_TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_WAKEUP)
+
+static ssize_t timerfd_read(struct rtdm_fd *fd, void __user *buf, size_t size)
+{
+	struct cobalt_tfd *tfd;
+	__u64 __user *u_ticks;
+	__u64 ticks = 0;
+	bool aligned;
+	spl_t s;
+	int err;
+
+	if (size < sizeof(ticks))
+		return -EINVAL;
+
+	u_ticks = buf;
+	if (!access_wok(u_ticks, sizeof(*u_ticks)))
+		return -EFAULT;
+
+	aligned = (((unsigned long)buf) & (sizeof(ticks) - 1)) == 0;
+
+	tfd = container_of(fd, struct cobalt_tfd, fd);
+
+	xnlock_get_irqsave(&nklock, s);
+	if (tfd->flags & COBALT_TFD_TICKED) {
+		err = 0;
+		goto out;
+	}
+	if (rtdm_fd_flags(fd) & O_NONBLOCK) {
+		err = -EAGAIN;
+		goto out;
+	}
+
+	do {
+		err = xnsynch_sleep_on(&tfd->readers, XN_INFINITE, XN_RELATIVE);
+	} while (err == 0 && (tfd->flags & COBALT_TFD_TICKED) == 0);
+
+	if (err & XNBREAK)
+		err = -EINTR;
+  out:
+	if (err == 0) {
+		xnticks_t now;
+
+		if (xntimer_periodic_p(&tfd->timer)) {
+			now = xnclock_read_raw(xntimer_clock(&tfd->timer));
+			ticks = 1 + xntimer_get_overruns(&tfd->timer,
+					 xnthread_current(), now);
+		} else
+			ticks = 1;
+
+		tfd->flags &= ~COBALT_TFD_TICKED;
+		xnselect_signal(&tfd->read_select, 0);
+	}
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (err == 0) {
+		err = aligned ? __xn_put_user(ticks, u_ticks) :
+			__xn_copy_to_user(buf, &ticks, sizeof(ticks));
+		if (err)
+			err =-EFAULT;
+	}
+
+	return err ?: sizeof(ticks);
+}
+
+static int
+timerfd_select(struct rtdm_fd *fd, struct xnselector *selector,
+	       unsigned type, unsigned index)
+{
+	struct cobalt_tfd *tfd = container_of(fd, struct cobalt_tfd, fd);
+	struct xnselect_binding *binding;
+	spl_t s;
+	int err;
+
+	if (type != XNSELECT_READ)
+		return -EBADF;
+
+	binding = xnmalloc(sizeof(*binding));
+	if (binding == NULL)
+		return -ENOMEM;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_set_affinity(&tfd->timer, xnthread_current()->sched);
+	err = xnselect_bind(&tfd->read_select, binding, selector, type,
+			index, tfd->flags & COBALT_TFD_TICKED);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+static void timerfd_close(struct rtdm_fd *fd)
+{
+	struct cobalt_tfd *tfd = container_of(fd, struct cobalt_tfd, fd);
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_destroy(&tfd->timer);
+	xnsynch_destroy(&tfd->readers);
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+	xnselect_destroy(&tfd->read_select); /* Reschedules. */
+	xnfree(tfd);
+}
+
+static struct rtdm_fd_ops timerfd_ops = {
+	.read_rt = timerfd_read,
+	.select = timerfd_select,
+	.close = timerfd_close,
+};
+
+static void timerfd_handler(struct xntimer *xntimer)
+{
+	struct cobalt_tfd *tfd;
+
+	tfd = container_of(xntimer, struct cobalt_tfd, timer);
+	tfd->flags |= COBALT_TFD_TICKED;
+	xnselect_signal(&tfd->read_select, 1);
+	xnsynch_wakeup_one_sleeper(&tfd->readers);
+	if (tfd->target)
+		xnthread_unblock(tfd->target);
+}
+
+COBALT_SYSCALL(timerfd_create, lostage, (int clockid, int flags))
+{
+	struct cobalt_tfd *tfd;
+	struct xnthread *curr;
+	struct xnclock *clock;
+	int ret, ufd;
+
+	if (flags & ~TFD_CREATE_FLAGS)
+		return -EINVAL;
+
+	clock = cobalt_clock_find(clockid);
+	if (IS_ERR(clock))
+		return PTR_ERR(clock);
+
+	tfd = xnmalloc(sizeof(*tfd));
+	if (tfd == NULL)
+		return -ENOMEM;
+
+	ufd = __rtdm_anon_getfd("[cobalt-timerfd]",
+				O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
+	if (ufd < 0) {
+		ret = ufd;
+		goto fail_getfd;
+	}
+
+	tfd->flags = flags & ~TFD_NONBLOCK;
+	tfd->fd.oflags = (flags & TFD_NONBLOCK) ? O_NONBLOCK : 0;
+	tfd->clockid = clockid;
+	curr = xnthread_current();
+	xntimer_init(&tfd->timer, clock, timerfd_handler,
+		     curr ? curr->sched : NULL, XNTIMER_UGRAVITY);
+	xnsynch_init(&tfd->readers, XNSYNCH_PRIO, NULL);
+	xnselect_init(&tfd->read_select);
+	tfd->target = NULL;
+
+	ret = rtdm_fd_enter(&tfd->fd, ufd, COBALT_TIMERFD_MAGIC, &timerfd_ops);
+	if (ret < 0)
+		goto fail;
+
+	ret = rtdm_fd_register(&tfd->fd, ufd);
+	if (ret < 0)
+		goto fail;
+
+	return ufd;
+fail:
+	xnselect_destroy(&tfd->read_select);
+	xnsynch_destroy(&tfd->readers);
+	xntimer_destroy(&tfd->timer);
+	__rtdm_anon_putfd(ufd);
+fail_getfd:
+	xnfree(tfd);
+
+	return ret;
+}
+
+static inline struct cobalt_tfd *tfd_get(int ufd)
+{
+	struct rtdm_fd *fd;
+
+	fd = rtdm_fd_get(ufd, COBALT_TIMERFD_MAGIC);
+	if (IS_ERR(fd)) {
+		int err = PTR_ERR(fd);
+		if (err == -EBADF && cobalt_current_process() == NULL)
+			err = -EPERM;
+		return ERR_PTR(err);
+	}
+
+	return container_of(fd, struct cobalt_tfd, fd);
+}
+
+static inline void tfd_put(struct cobalt_tfd *tfd)
+{
+	rtdm_fd_put(&tfd->fd);
+}
+
+int __cobalt_timerfd_settime(int fd, int flags,
+			     const struct itimerspec64 *value,
+			     struct itimerspec64 *ovalue)
+{
+	struct cobalt_tfd *tfd;
+	int cflag, ret;
+	spl_t s;
+
+	if (flags & ~COBALT_TFD_SETTIME_FLAGS)
+		return -EINVAL;
+
+	tfd = tfd_get(fd);
+	if (IS_ERR(tfd))
+		return PTR_ERR(tfd);
+
+	cflag = (flags & TFD_TIMER_ABSTIME) ? TIMER_ABSTIME : 0;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	tfd->target = NULL;
+	if (flags & TFD_WAKEUP) {
+		tfd->target = xnthread_current();
+		if (tfd->target == NULL) {
+			ret = -EPERM;
+			goto out;
+		}
+	}
+
+	if (ovalue)
+		__cobalt_timer_getval(&tfd->timer, ovalue);
+
+	xntimer_set_affinity(&tfd->timer, xnthread_current()->sched);
+
+	ret = __cobalt_timer_setval(&tfd->timer,
+				    clock_flag(cflag, tfd->clockid), value);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	tfd_put(tfd);
+
+	return ret;
+}
+
+COBALT_SYSCALL(timerfd_settime, primary,
+	       (int fd, int flags,
+		const struct __user_old_itimerspec __user *new_value,
+		struct __user_old_itimerspec __user *old_value))
+{
+	struct itimerspec64 ovalue, value;
+	int ret;
+
+	ret = cobalt_get_u_itimerspec(&value, new_value);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_timerfd_settime(fd, flags, &value, &ovalue);
+	if (ret)
+		return ret;
+
+	if (old_value) {
+		ret = cobalt_copy_to_user(old_value, &ovalue, sizeof(ovalue));
+		value.it_value.tv_sec = 0;
+		value.it_value.tv_nsec = 0;
+		__cobalt_timerfd_settime(fd, flags, &value, NULL);
+	}
+
+	return ret;
+}
+
+int __cobalt_timerfd_gettime(int fd, struct itimerspec64 *value)
+{
+	struct cobalt_tfd *tfd;
+	spl_t s;
+
+	tfd = tfd_get(fd);
+	if (IS_ERR(tfd))
+		return PTR_ERR(tfd);
+
+	xnlock_get_irqsave(&nklock, s);
+	__cobalt_timer_getval(&tfd->timer, value);
+	xnlock_put_irqrestore(&nklock, s);
+
+	tfd_put(tfd);
+
+	return 0;
+}
+
+COBALT_SYSCALL(timerfd_gettime, current,
+	       (int fd, struct __user_old_itimerspec __user *curr_value))
+{
+	struct itimerspec64 value;
+	int ret;
+
+	ret = __cobalt_timerfd_gettime(fd, &value);
+
+	return ret ?: cobalt_put_u_itimerspec(curr_value, &value);
+}
+++ linux-patched/kernel/xenomai/posix/cond.c	2022-03-21 12:58:29.029892472 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "internal.h"
+#include "thread.h"
+#include "mutex.h"
+#include "cond.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+
+static inline int
+pthread_cond_init(struct cobalt_cond_shadow *cnd, const struct cobalt_condattr *attr)
+{
+	int synch_flags = XNSYNCH_PRIO, ret;
+	struct cobalt_cond *cond, *old_cond;
+	struct cobalt_cond_state *state;
+	struct cobalt_ppd *sys_ppd;
+	struct list_head *condq;
+	spl_t s;
+
+	cond = xnmalloc(sizeof(*cond));
+	if (cond == NULL)
+		return -ENOMEM;
+
+	sys_ppd = cobalt_ppd_get(attr->pshared);
+	state = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*state));
+	if (state == NULL) {
+		ret = -EAGAIN;
+		goto fail_umm;
+	}
+	cond->state = state;
+	state->pending_signals = 0;
+	state->mutex_state_offset = ~0U;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	condq = &cobalt_current_resources(attr->pshared)->condq;
+	if (cnd->magic == COBALT_COND_MAGIC && !list_empty(condq)) {
+		old_cond = xnregistry_lookup(cnd->handle, NULL);
+		if (cobalt_obj_active(old_cond, COBALT_COND_MAGIC,
+				      typeof(*old_cond))) {
+			ret = -EBUSY;
+			goto fail_register;
+		}
+	}
+
+	ret = xnregistry_enter_anon(cond, &cond->resnode.handle);
+	if (ret < 0)
+		goto fail_register;
+	if (attr->pshared)
+		cond->resnode.handle |= XNSYNCH_PSHARED;
+	cond->magic = COBALT_COND_MAGIC;
+	xnsynch_init(&cond->synchbase, synch_flags, NULL);
+	cond->attr = *attr;
+	cond->mutex = NULL;
+	cobalt_add_resource(&cond->resnode, cond, attr->pshared);
+
+	cnd->handle = cond->resnode.handle;
+	cnd->state_offset = cobalt_umm_offset(&sys_ppd->umm, state);
+	cnd->magic = COBALT_COND_MAGIC;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+fail_register:
+	xnlock_put_irqrestore(&nklock, s);
+	cobalt_umm_free(&sys_ppd->umm, state);
+fail_umm:
+	xnfree(cond);
+
+	return ret;
+}
+
+static inline int pthread_cond_destroy(struct cobalt_cond_shadow *cnd)
+{
+	struct cobalt_cond *cond;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	cond = xnregistry_lookup(cnd->handle, NULL);
+	if (cond == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+
+	if (!cobalt_obj_active(cnd, COBALT_COND_MAGIC, struct cobalt_cond_shadow)
+	    || !cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+
+	if (cond->resnode.scope !=
+	    cobalt_current_resources(cond->attr.pshared)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EPERM;
+	}
+
+	if (xnsynch_pended_p(&cond->synchbase) || cond->mutex) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBUSY;
+	}
+
+	cobalt_cond_reclaim(&cond->resnode, s); /* drops lock */
+
+	cobalt_mark_deleted(cnd);
+
+	return 0;
+}
+
+static inline int cobalt_cond_timedwait_prologue(struct xnthread *cur,
+						 struct cobalt_cond *cond,
+						 struct cobalt_mutex *mutex,
+						 xnticks_t abs_to)
+{
+	int err, ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/* If another thread waiting for cond does not use the same mutex */
+	if (!cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)
+	    || (cond->mutex && cond->mutex != mutex)) {
+		err = -EINVAL;
+		goto unlock_and_return;
+	}
+
+	if (cond->resnode.scope !=
+	    cobalt_current_resources(cond->attr.pshared)) {
+		err = -EPERM;
+		goto unlock_and_return;
+	}
+
+	if (mutex->attr.pshared != cond->attr.pshared) {
+		err = -EINVAL;
+		goto unlock_and_return;
+	}
+
+	/* Unlock mutex. */
+	err = cobalt_mutex_release(cur, mutex);
+	if (err < 0)
+		goto unlock_and_return;
+
+	/* err == 1 means a reschedule is needed, but do not
+	   reschedule here, releasing the mutex and suspension must be
+	   done atomically in pthread_cond_*wait. */
+
+	/* Bind mutex to cond. */
+	if (cond->mutex == NULL) {
+		cond->mutex = mutex;
+		list_add_tail(&cond->mutex_link, &mutex->conds);
+	}
+
+	/* Wait for another thread to signal the condition. */
+	if (abs_to != XN_INFINITE)
+		ret = xnsynch_sleep_on(&cond->synchbase, abs_to,
+				       clock_flag(TIMER_ABSTIME, cond->attr.clock));
+	else
+		ret = xnsynch_sleep_on(&cond->synchbase, XN_INFINITE, XN_RELATIVE);
+
+	/* There are three possible wakeup conditions :
+	   - cond_signal / cond_broadcast, no status bit is set, and the function
+	     should return 0 ;
+	   - timeout, the status XNTIMEO is set, and the function should return
+	     ETIMEDOUT ;
+	   - pthread_kill, the status bit XNBREAK is set, but ignored, the
+	     function simply returns EINTR (used only by the user-space
+	     interface, replaced by 0 anywhere else), causing a wakeup, spurious
+	     or not whether pthread_cond_signal was called between pthread_kill
+	     and the moment when xnsynch_sleep_on returned ;
+	 */
+
+	err = 0;
+
+	if (ret & XNBREAK)
+		err = -EINTR;
+	else if (ret & XNTIMEO)
+		err = -ETIMEDOUT;
+
+unlock_and_return:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+static inline int cobalt_cond_timedwait_epilogue(struct xnthread *cur,
+						 struct cobalt_cond *cond,
+						 struct cobalt_mutex *mutex)
+{
+	int err;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	err = __cobalt_mutex_acquire_unchecked(cur, mutex, NULL);
+	if (err == -EINTR)
+		goto unlock_and_return;
+
+	/*
+	 * Unbind mutex and cond, if no other thread is waiting, if
+	 * the job was not already done.
+	 */
+	if (!xnsynch_pended_p(&cond->synchbase) && cond->mutex == mutex) {
+		cond->mutex = NULL;
+		list_del(&cond->mutex_link);
+	}
+
+unlock_and_return:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+COBALT_SYSCALL(cond_init, current,
+	       (struct cobalt_cond_shadow __user *u_cnd,
+		const struct cobalt_condattr __user *u_attr))
+{
+	struct cobalt_cond_shadow cnd;
+	struct cobalt_condattr attr;
+	int err;
+
+	if (cobalt_copy_from_user(&cnd, u_cnd, sizeof(cnd)))
+		return -EFAULT;
+
+	if (cobalt_copy_from_user(&attr, u_attr, sizeof(attr)))
+		return -EFAULT;
+
+	trace_cobalt_cond_init(u_cnd, &attr);
+
+	err = pthread_cond_init(&cnd, &attr);
+	if (err < 0)
+		return err;
+
+	return cobalt_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd));
+}
+
+COBALT_SYSCALL(cond_destroy, current,
+	       (struct cobalt_cond_shadow __user *u_cnd))
+{
+	struct cobalt_cond_shadow cnd;
+	int err;
+
+	if (cobalt_copy_from_user(&cnd, u_cnd, sizeof(cnd)))
+		return -EFAULT;
+
+	trace_cobalt_cond_destroy(u_cnd);
+
+	err = pthread_cond_destroy(&cnd);
+	if (err < 0)
+		return err;
+
+	return cobalt_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd));
+}
+
+struct us_cond_data {
+	int err;
+};
+
+static inline int cond_fetch_timeout(struct timespec64 *ts,
+				     const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT :	cobalt_get_u_timespec(ts, u_ts);
+}
+
+int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow __user *u_cnd,
+				struct cobalt_mutex_shadow __user *u_mx,
+				int *u_err,
+				void __user *u_ts,
+				int (*fetch_timeout)(struct timespec64 *ts,
+						     const void __user *u_ts))
+{
+	struct xnthread *cur = xnthread_current();
+	struct cobalt_cond *cond;
+	struct cobalt_mutex *mx;
+	struct us_cond_data d;
+	struct timespec64 ts;
+	xnhandle_t handle;
+	int err, perr = 0;
+	__u32 offset;
+
+	handle = cobalt_get_handle_from_user(&u_cnd->handle);
+	cond = xnregistry_lookup(handle, NULL);
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+	mx = xnregistry_lookup(handle, NULL);
+
+	if (cond->mutex == NULL) {
+		__xn_get_user(offset, &u_mx->state_offset);
+		cond->state->mutex_state_offset = offset;
+	}
+
+	if (fetch_timeout) {
+		err = fetch_timeout(&ts, u_ts);
+		if (err == 0) {
+			trace_cobalt_cond_timedwait(u_cnd, u_mx, &ts);
+			err = cobalt_cond_timedwait_prologue(cur, cond, mx,
+							     ts2ns(&ts) + 1);
+		}
+	} else {
+		trace_cobalt_cond_wait(u_cnd, u_mx);
+		err = cobalt_cond_timedwait_prologue(cur, cond, mx, XN_INFINITE);
+	}
+
+	switch(err) {
+	case 0:
+	case -ETIMEDOUT:
+		perr = d.err = err;
+		err = cobalt_cond_timedwait_epilogue(cur, cond, mx);
+		break;
+
+	case -EINTR:
+		perr = err;
+		d.err = 0;	/* epilogue should return 0. */
+		break;
+
+	default:
+		/* Please gcc and handle the case which will never
+		   happen */
+		d.err = EINVAL;
+	}
+
+	if (cond->mutex == NULL)
+		cond->state->mutex_state_offset = ~0U;
+
+	if (err == -EINTR)
+		__xn_put_user(d.err, u_err);
+
+	return err == 0 ? perr : err;
+}
+
+/* pthread_cond_wait_prologue(cond, mutex, count_ptr, timed, timeout) */
+COBALT_SYSCALL(cond_wait_prologue, nonrestartable,
+	       (struct cobalt_cond_shadow __user *u_cnd,
+		struct cobalt_mutex_shadow __user *u_mx,
+		int *u_err,
+		unsigned int timed,
+		struct __user_old_timespec __user *u_ts))
+{
+	return __cobalt_cond_wait_prologue(u_cnd, u_mx, u_err, u_ts,
+					   timed ? cond_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL(cond_wait_epilogue, primary,
+	       (struct cobalt_cond_shadow __user *u_cnd,
+		struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct xnthread *cur = xnthread_current();
+	struct cobalt_cond *cond;
+	struct cobalt_mutex *mx;
+	xnhandle_t handle;
+	int err;
+
+	handle = cobalt_get_handle_from_user(&u_cnd->handle);
+	cond = xnregistry_lookup(handle, NULL);
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+	mx = xnregistry_lookup(handle, NULL);
+	err = cobalt_cond_timedwait_epilogue(cur, cond, mx);
+
+	if (cond->mutex == NULL)
+		cond->state->mutex_state_offset = ~0U;
+
+	return err;
+}
+
+int cobalt_cond_deferred_signals(struct cobalt_cond *cond)
+{
+	struct cobalt_cond_state *state;
+	__u32 pending_signals;
+	int need_resched;
+
+	state = cond->state;
+	pending_signals = state->pending_signals;
+
+	switch(pending_signals) {
+	default:
+		state->pending_signals = 0;
+		need_resched = xnsynch_wakeup_many_sleepers(&cond->synchbase,
+							    pending_signals);
+		break;
+
+	case ~0U:
+		need_resched =
+			xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED;
+		state->pending_signals = 0;
+		break;
+
+	case 0:
+		need_resched = 0;
+		break;
+	}
+
+	return need_resched;
+}
+
+void cobalt_cond_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_cond *cond;
+
+	cond = container_of(node, struct cobalt_cond, resnode);
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	xnsynch_destroy(&cond->synchbase);
+	cobalt_mark_deleted(cond);
+	xnlock_put_irqrestore(&nklock, s);
+
+	cobalt_umm_free(&cobalt_ppd_get(cond->attr.pshared)->umm,
+			cond->state);
+	xnfree(cond);
+}
+++ linux-patched/kernel/xenomai/posix/syscall.h	2022-03-21 12:58:29.026892501 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/process.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SYSCALL_H
+#define _COBALT_POSIX_SYSCALL_H
+
+#include <cobalt/uapi/syscall.h>
+
+struct pt_regs;
+
+/* Regular (native) syscall handler implementation. */
+#define COBALT_SYSCALL(__name, __mode, __args)	\
+	long CoBaLt_ ## __name __args
+
+/* Regular (native) syscall handler declaration. */
+#define COBALT_SYSCALL_DECL(__name, __args)	\
+	long CoBaLt_ ## __name __args
+
+#include <asm/xenomai/syscall32.h>
+
+int handle_head_syscall(bool caller_is_relaxed,
+			struct pt_regs *regs);
+
+int handle_root_syscall(struct pt_regs *regs);
+
+#endif /* !_COBALT_POSIX_SYSCALL_H */
+++ linux-patched/kernel/xenomai/posix/process.c	2022-03-21 12:58:29.022892540 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/nsem.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2001-2014 The Xenomai project <http://www.xenomai.org>
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org>
+ * RTAI/fusion Copyright (C) 2004 The RTAI project <http://www.rtai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/unistd.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/cred.h>
+#include <linux/file.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <pipeline/kevents.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/uapi/signal.h>
+#include <cobalt/uapi/syscall.h>
+#include <pipeline/sched.h>
+#include <trace/events/cobalt-core.h>
+#include <rtdm/driver.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/syscall.h>
+#include "../debug.h"
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "mutex.h"
+#include "cond.h"
+#include "mqueue.h"
+#include "sem.h"
+#include "signal.h"
+#include "timer.h"
+#include "monitor.h"
+#include "clock.h"
+#include "event.h"
+#include "timerfd.h"
+#include "io.h"
+
+static int gid_arg = -1;
+module_param_named(allowed_group, gid_arg, int, 0644);
+
+static DEFINE_MUTEX(personality_lock);
+
+static struct hlist_head *process_hash;
+DEFINE_PRIVATE_XNLOCK(process_hash_lock);
+#define PROCESS_HASH_SIZE 13
+
+struct xnthread_personality *cobalt_personalities[NR_PERSONALITIES];
+
+static struct xnsynch yield_sync;
+
+LIST_HEAD(cobalt_global_thread_list);
+
+DEFINE_XNPTREE(posix_ptree, "posix");
+
+struct cobalt_resources cobalt_global_resources = {
+	.condq = LIST_HEAD_INIT(cobalt_global_resources.condq),
+	.mutexq = LIST_HEAD_INIT(cobalt_global_resources.mutexq),
+	.semq = LIST_HEAD_INIT(cobalt_global_resources.semq),
+	.monitorq = LIST_HEAD_INIT(cobalt_global_resources.monitorq),
+	.eventq = LIST_HEAD_INIT(cobalt_global_resources.eventq),
+	.schedq = LIST_HEAD_INIT(cobalt_global_resources.schedq),
+};
+
+static unsigned __attribute__((pure)) process_hash_crunch(struct mm_struct *mm)
+{
+	unsigned long hash = ((unsigned long)mm - PAGE_OFFSET) / sizeof(*mm);
+	return hash % PROCESS_HASH_SIZE;
+}
+
+static struct cobalt_process *__process_hash_search(struct mm_struct *mm)
+{
+	unsigned int bucket = process_hash_crunch(mm);
+	struct cobalt_process *p;
+
+	hlist_for_each_entry(p, &process_hash[bucket], hlink)
+		if (p->mm == mm)
+			return p;
+	
+	return NULL;
+}
+
+static int process_hash_enter(struct cobalt_process *p)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned int bucket = process_hash_crunch(mm);
+	int err;
+	spl_t s;
+
+	xnlock_get_irqsave(&process_hash_lock, s);
+	if (__process_hash_search(mm)) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	p->mm = mm;
+	hlist_add_head(&p->hlink, &process_hash[bucket]);
+	err = 0;
+  out:
+	xnlock_put_irqrestore(&process_hash_lock, s);
+	return err;
+}
+
+static void process_hash_remove(struct cobalt_process *p)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&process_hash_lock, s);
+	if (p->mm)
+		hlist_del(&p->hlink);
+	xnlock_put_irqrestore(&process_hash_lock, s);
+}
+
+struct cobalt_process *cobalt_search_process(struct mm_struct *mm)
+{
+	struct cobalt_process *process;
+	spl_t s;
+	
+	xnlock_get_irqsave(&process_hash_lock, s);
+	process = __process_hash_search(mm);
+	xnlock_put_irqrestore(&process_hash_lock, s);
+	
+	return process;
+}
+
+static void *lookup_context(int xid)
+{
+	struct cobalt_process *process = cobalt_current_process();
+	void *priv = NULL;
+	spl_t s;
+
+	xnlock_get_irqsave(&process_hash_lock, s);
+	/*
+	 * First try matching the process context attached to the
+	 * (usually main) thread which issued sc_cobalt_bind. If not
+	 * found, try matching by mm context, which should point us
+	 * back to the latter. If none match, then the current process
+	 * is unbound.
+	 */
+	if (process == NULL && current->mm)
+		process = __process_hash_search(current->mm);
+	if (process)
+		priv = process->priv[xid];
+
+	xnlock_put_irqrestore(&process_hash_lock, s);
+
+	return priv;
+}
+
+void cobalt_remove_process(struct cobalt_process *process)
+{
+	struct xnthread_personality *personality;
+	void *priv;
+	int xid;
+
+	mutex_lock(&personality_lock);
+
+	for (xid = NR_PERSONALITIES - 1; xid >= 0; xid--) {
+		if (!__test_and_clear_bit(xid, &process->permap))
+			continue;
+		personality = cobalt_personalities[xid];
+		priv = process->priv[xid];
+		if (priv == NULL)
+			continue;
+		/*
+		 * CAUTION: process potentially refers to stale memory
+		 * upon return from detach_process() for the Cobalt
+		 * personality, so don't dereference it afterwards.
+		 */
+		if (xid)
+			process->priv[xid] = NULL;
+		__clear_bit(personality->xid, &process->permap);
+		personality->ops.detach_process(priv);
+		atomic_dec(&personality->refcnt);
+		XENO_WARN_ON(COBALT, atomic_read(&personality->refcnt) < 0);
+		if (personality->module)
+			module_put(personality->module);
+	}
+
+	cobalt_set_process(NULL);
+
+	mutex_unlock(&personality_lock);
+}
+
+static void post_ppd_release(struct cobalt_umm *umm)
+{
+	struct cobalt_process *process;
+
+	process = container_of(umm, struct cobalt_process, sys_ppd.umm);
+	kfree(process);
+}
+
+static inline char *get_exe_path(struct task_struct *p)
+{
+	struct file *exe_file;
+	char *pathname, *buf;
+	struct mm_struct *mm;
+	struct path path;
+
+	/*
+	 * PATH_MAX is fairly large, and in any case won't fit on the
+	 * caller's stack happily; since we are mapping a shadow,
+	 * which is a heavyweight operation anyway, let's pick the
+	 * memory from the page allocator.
+	 */
+	buf = (char *)__get_free_page(GFP_KERNEL);
+	if (buf == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	mm = get_task_mm(p);
+	if (mm == NULL) {
+		pathname = "vmlinux";
+		goto copy;	/* kernel thread */
+	}
+
+	exe_file = get_mm_exe_file(mm);
+	mmput(mm);
+	if (exe_file == NULL) {
+		pathname = ERR_PTR(-ENOENT);
+		goto out;	/* no luck. */
+	}
+
+	path = exe_file->f_path;
+	path_get(&exe_file->f_path);
+	fput(exe_file);
+	pathname = d_path(&path, buf, PATH_MAX);
+	path_put(&path);
+	if (IS_ERR(pathname))
+		goto out;	/* mmmh... */
+copy:
+	/* caution: d_path() may start writing anywhere in the buffer. */
+	pathname = kstrdup(pathname, GFP_KERNEL);
+out:
+	free_page((unsigned long)buf);
+
+	return pathname;
+}
+
+static inline int raise_cap(int cap)
+{
+	struct cred *new;
+
+	new = prepare_creds();
+	if (new == NULL)
+		return -ENOMEM;
+
+	cap_raise(new->cap_effective, cap);
+
+	return commit_creds(new);
+}
+
+static int bind_personality(struct xnthread_personality *personality)
+{
+	struct cobalt_process *process;
+	void *priv;
+
+	/*
+	 * We also check capabilities for stacking a Cobalt extension,
+	 * in case the process dropped the supervisor privileges after
+	 * a successful initial binding to the Cobalt interface.
+	 */
+	if (!capable(CAP_SYS_NICE) &&
+	    (gid_arg == -1 || !in_group_p(KGIDT_INIT(gid_arg))))
+		return -EPERM;
+	/*
+	 * Protect from the same process binding to the same interface
+	 * several times.
+	 */
+	priv = lookup_context(personality->xid);
+	if (priv)
+		return 0;
+
+	priv = personality->ops.attach_process();
+	if (IS_ERR(priv))
+		return PTR_ERR(priv);
+
+	process = cobalt_current_process();
+	/*
+	 * We are still covered by the personality_lock, so we may
+	 * safely bump the module refcount after the attach handler
+	 * has returned.
+	 */
+	if (personality->module && !try_module_get(personality->module)) {
+		personality->ops.detach_process(priv);
+		return -EAGAIN;
+	}
+
+	__set_bit(personality->xid, &process->permap);
+	atomic_inc(&personality->refcnt);
+	process->priv[personality->xid] = priv;
+
+	raise_cap(CAP_SYS_NICE);
+	raise_cap(CAP_IPC_LOCK);
+	raise_cap(CAP_SYS_RAWIO);
+
+	return 0;
+}
+
+int cobalt_bind_personality(unsigned int magic)
+{
+	struct xnthread_personality *personality;
+	int xid, ret = -ESRCH;
+
+	mutex_lock(&personality_lock);
+
+	for (xid = 1; xid < NR_PERSONALITIES; xid++) {
+		personality = cobalt_personalities[xid];
+		if (personality && personality->magic == magic) {
+			ret = bind_personality(personality);
+			break;
+		}
+	}
+
+	mutex_unlock(&personality_lock);
+
+	return ret ?: xid;
+}
+
+int cobalt_bind_core(int ufeatures)
+{
+	struct cobalt_process *process;
+	int ret;
+
+	mutex_lock(&personality_lock);
+	ret = bind_personality(&cobalt_personality);
+	mutex_unlock(&personality_lock);
+	if (ret)
+		return ret;
+
+	process = cobalt_current_process();
+	/* Feature set userland knows about. */
+	process->ufeatures = ufeatures;
+
+	return 0;
+}
+
+/**
+ * @fn int cobalt_register_personality(struct xnthread_personality *personality)
+ * @internal
+ * @brief Register a new interface personality.
+ *
+ * - personality->ops.attach_process() is called when a user-space
+ *   process binds to the personality, on behalf of one of its
+ *   threads. The attach_process() handler may return:
+ *
+ *   . an opaque pointer, representing the context of the calling
+ *   process for this personality;
+ *
+ *   . a NULL pointer, meaning that no per-process structure should be
+ *   attached to this process for this personality;
+ *
+ *   . ERR_PTR(negative value) indicating an error, the binding
+ *   process will then abort.
+ *
+ * - personality->ops.detach_process() is called on behalf of an
+ *   exiting user-space process which has previously attached to the
+ *   personality. This handler is passed a pointer to the per-process
+ *   data received earlier from the ops->attach_process() handler.
+ *
+ * @return the personality (extension) identifier.
+ *
+ * @note cobalt_get_context() is NULL when ops.detach_process() is
+ * invoked for the personality the caller detaches from.
+ *
+ * @coretags{secondary-only}
+ */
+int cobalt_register_personality(struct xnthread_personality *personality)
+{
+	int xid;
+
+	mutex_lock(&personality_lock);
+
+	for (xid = 0; xid < NR_PERSONALITIES; xid++) {
+		if (cobalt_personalities[xid] == NULL) {
+			personality->xid = xid;
+			atomic_set(&personality->refcnt, 0);
+			cobalt_personalities[xid] = personality;
+			goto out;
+		}
+	}
+
+	xid = -EAGAIN;
+out:
+	mutex_unlock(&personality_lock);
+
+	return xid;
+}
+EXPORT_SYMBOL_GPL(cobalt_register_personality);
+
+/*
+ * @brief Unregister an interface personality.
+ *
+ * @coretags{secondary-only}
+ */
+int cobalt_unregister_personality(int xid)
+{
+	struct xnthread_personality *personality;
+	int ret = 0;
+
+	if (xid < 0 || xid >= NR_PERSONALITIES)
+		return -EINVAL;
+
+	mutex_lock(&personality_lock);
+
+	personality = cobalt_personalities[xid];
+	if (atomic_read(&personality->refcnt) > 0)
+		ret = -EBUSY;
+	else
+		cobalt_personalities[xid] = NULL;
+
+	mutex_unlock(&personality_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cobalt_unregister_personality);
+
+/**
+ * Stack a new personality over Cobalt for the current thread.
+ *
+ * This service registers the current thread as a member of the
+ * additional personality identified by @a xid. If the current thread
+ * is already assigned this personality, the call returns successfully
+ * with no effect.
+ *
+ * @param xid the identifier of the additional personality.
+ *
+ * @return A handle to the previous personality. The caller should
+ * save this handle for unstacking @a xid when applicable via a call
+ * to cobalt_pop_personality().
+ *
+ * @coretags{secondary-only}
+ */
+struct xnthread_personality *
+cobalt_push_personality(int xid)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+	struct xnthread_personality *prev, *next;
+	struct xnthread *thread = p->thread;
+
+	secondary_mode_only();
+
+	mutex_lock(&personality_lock);
+
+	if (xid < 0 || xid >= NR_PERSONALITIES ||
+	    p->process == NULL || !test_bit(xid, &p->process->permap)) {
+		mutex_unlock(&personality_lock);
+		return NULL;
+	}
+
+	next = cobalt_personalities[xid];
+	prev = thread->personality;
+	if (next == prev) {
+		mutex_unlock(&personality_lock);
+		return prev;
+	}
+
+	thread->personality = next;
+	mutex_unlock(&personality_lock);
+	xnthread_run_handler(thread, map_thread);
+
+	return prev;
+}
+EXPORT_SYMBOL_GPL(cobalt_push_personality);
+
+/**
+ * Pop the topmost personality from the current thread.
+ *
+ * This service pops the topmost personality off the current thread.
+ *
+ * @param prev the previous personality which was returned by the
+ * latest call to cobalt_push_personality() for the current thread.
+ *
+ * @coretags{secondary-only}
+ */
+void cobalt_pop_personality(struct xnthread_personality *prev)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+	struct xnthread *thread = p->thread;
+
+	secondary_mode_only();
+	thread->personality = prev;
+}
+EXPORT_SYMBOL_GPL(cobalt_pop_personality);
+
+/**
+ * Return the per-process data attached to the calling user process.
+ *
+ * This service returns the per-process data attached to the calling
+ * user process for the personality whose xid is @a xid.
+ *
+ * The per-process data was obtained from the ->attach_process()
+ * handler defined for the personality @a xid refers to.
+ *
+ * See cobalt_register_personality() documentation for information on
+ * the way to attach a per-process data to a process.
+ *
+ * @param xid the personality identifier.
+ *
+ * @return the per-process data if the current context is a user-space
+ * process; @return NULL otherwise. As a special case,
+ * cobalt_get_context(0) returns the current Cobalt process
+ * descriptor, which is strictly identical to calling
+ * cobalt_current_process().
+ *
+ * @coretags{task-unrestricted}
+ */
+void *cobalt_get_context(int xid)
+{
+	return lookup_context(xid);
+}
+EXPORT_SYMBOL_GPL(cobalt_get_context);
+
+int cobalt_yield(xnticks_t min, xnticks_t max)
+{
+	xnticks_t start;
+	int ret;
+
+	start = xnclock_read_monotonic(&nkclock);
+	max += start;
+	min += start;
+
+	do {
+		ret = xnsynch_sleep_on(&yield_sync, max, XN_ABSOLUTE);
+		if (ret & XNBREAK)
+			return -EINTR;
+	} while (ret == 0 && xnclock_read_monotonic(&nkclock) < min);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cobalt_yield);
+
+/**
+ * @fn int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff)
+ * @internal
+ * @brief Create a shadow thread context over a user task.
+ *
+ * This call maps a Xenomai thread to the current regular Linux task
+ * running in userland.  The priority and scheduling class of the
+ * underlying Linux task are not affected; it is assumed that the
+ * interface library did set them appropriately before issuing the
+ * shadow mapping request.
+ *
+ * @param thread The descriptor address of the new shadow thread to be
+ * mapped to current. This descriptor must have been previously
+ * initialized by a call to xnthread_init().
+ *
+ * @param u_winoff will receive the offset of the per-thread
+ * "u_window" structure in the global heap associated to @a
+ * thread. This structure reflects thread state information visible
+ * from userland through a shared memory window.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -EINVAL is returned if the thread control block does not bear the
+ * XNUSER bit.
+ *
+ * - -EBUSY is returned if either the current Linux task or the
+ * associated shadow thread is already involved in a shadow mapping.
+ *
+ * @coretags{secondary-only}
+ */
+int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff)
+{
+	struct xnthread_user_window *u_window;
+	struct xnthread_start_attr attr;
+	struct cobalt_ppd *sys_ppd;
+	struct cobalt_umm *umm;
+	int ret;
+
+	if (!xnthread_test_state(thread, XNUSER))
+		return -EINVAL;
+
+	if (xnthread_current() || xnthread_test_state(thread, XNMAPPED))
+		return -EBUSY;
+
+	if (!access_wok(u_winoff, sizeof(*u_winoff)))
+		return -EFAULT;
+
+	ret = pipeline_prepare_current();
+	if (ret)
+		return ret;
+
+	umm = &cobalt_kernel_ppd.umm;
+	u_window = cobalt_umm_zalloc(umm, sizeof(*u_window));
+	if (u_window == NULL)
+		return -ENOMEM;
+
+	thread->u_window = u_window;
+	__xn_put_user(cobalt_umm_offset(umm, u_window), u_winoff);
+	xnthread_pin_initial(thread);
+
+	/*
+	 * CAUTION: we enable the pipeline notifier only when our
+	 * shadow TCB is consistent, so that we won't trigger false
+	 * positive in debug code from handle_schedule_event() and
+	 * friends.
+	 */
+	pipeline_init_shadow_tcb(thread);
+	xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
+	pipeline_attach_current(thread);
+	xnthread_set_state(thread, XNMAPPED);
+	xndebug_shadow_init(thread);
+	sys_ppd = cobalt_ppd_get(0);
+	atomic_inc(&sys_ppd->refcnt);
+	/*
+	 * ->map_thread() handler is invoked after the TCB is fully
+	 * built, and when we know for sure that current will go
+	 * through our task-exit handler, because it has a shadow
+	 * extension and I-pipe notifications will soon be enabled for
+	 * it.
+	 */
+	xnthread_run_handler(thread, map_thread);
+	pipeline_enable_kevents();
+
+	attr.mode = 0;
+	attr.entry = NULL;
+	attr.cookie = NULL;
+	ret = xnthread_start(thread, &attr);
+	if (ret)
+		return ret;
+
+	xnthread_sync_window(thread);
+
+	xntrace_pid(xnthread_host_pid(thread),
+		    xnthread_current_priority(thread));
+
+	return 0;
+}
+
+void cobalt_signal_yield(void)
+{
+	spl_t s;
+
+	if (!xnsynch_pended_p(&yield_sync))
+		return;
+
+	xnlock_get_irqsave(&nklock, s);
+	if (xnsynch_pended_p(&yield_sync)) {
+		xnsynch_flush(&yield_sync, 0);
+		xnsched_run();
+	}
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+static inline struct cobalt_process *
+process_from_thread(struct xnthread *thread)
+{
+	return container_of(thread, struct cobalt_thread, threadbase)->process;
+}
+
+void cobalt_stop_debugged_process(struct xnthread *thread)
+{
+	struct cobalt_process *process = process_from_thread(thread);
+	struct cobalt_thread *cth;
+
+	if (process->debugged_threads > 0)
+		return;
+
+	list_for_each_entry(cth, &process->thread_list, next) {
+		if (&cth->threadbase == thread)
+			continue;
+
+		xnthread_suspend(&cth->threadbase, XNDBGSTOP, XN_INFINITE,
+				 XN_RELATIVE, NULL);
+	}
+}
+
+static void cobalt_resume_debugged_process(struct cobalt_process *process)
+{
+	struct cobalt_thread *cth;
+
+	xnsched_lock();
+
+	list_for_each_entry(cth, &process->thread_list, next)
+		if (xnthread_test_state(&cth->threadbase, XNDBGSTOP))
+			xnthread_resume(&cth->threadbase, XNDBGSTOP);
+
+	xnsched_unlock();
+}
+
+/* called with nklock held */
+void cobalt_register_debugged_thread(struct xnthread *thread)
+{
+	struct cobalt_process *process = process_from_thread(thread);
+
+	xnthread_set_state(thread, XNSSTEP);
+
+	cobalt_stop_debugged_process(thread);
+	process->debugged_threads++;
+
+	if (xnthread_test_state(thread, XNRELAX))
+		xnthread_suspend(thread, XNDBGSTOP, XN_INFINITE, XN_RELATIVE,
+				 NULL);
+}
+
+/* called with nklock held */
+void cobalt_unregister_debugged_thread(struct xnthread *thread)
+{
+	struct cobalt_process *process = process_from_thread(thread);
+
+	process->debugged_threads--;
+	xnthread_clear_state(thread, XNSSTEP);
+
+	if (process->debugged_threads == 0)
+		cobalt_resume_debugged_process(process);
+}
+
+int cobalt_handle_setaffinity_event(struct task_struct *task)
+{
+#ifdef CONFIG_SMP
+	struct xnthread *thread;
+	spl_t s;
+
+	thread = xnthread_from_task(task);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	/*
+	 * Detect a Cobalt thread sleeping in primary mode which is
+	 * required to migrate to another CPU by the host kernel.
+	 *
+	 * We may NOT fix up thread->sched immediately using the
+	 * passive migration call, because that latter always has to
+	 * take place on behalf of the target thread itself while
+	 * running in secondary mode. Therefore, that thread needs to
+	 * go through secondary mode first, then move back to primary
+	 * mode, so that affinity_ok() does the fixup work.
+	 *
+	 * We force this by sending a SIGSHADOW signal to the migrated
+	 * thread, asking it to switch back to primary mode from the
+	 * handler, at which point the interrupted syscall may be
+	 * restarted.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(thread, XNTHREAD_BLOCK_BITS & ~XNRELAX))
+		__xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HARDEN);
+
+	xnlock_put_irqrestore(&nklock, s);
+#endif /* CONFIG_SMP */
+
+	return KEVENT_PROPAGATE;
+}
+
+#ifdef CONFIG_SMP
+void cobalt_adjust_affinity(struct task_struct *task) /* nklocked, IRQs off */
+{
+	struct xnthread *thread = xnthread_from_task(task);
+	struct xnsched *sched;
+	int cpu = task_cpu(task);
+
+	/*
+	 * To maintain consistency between both Cobalt and host
+	 * schedulers, reflecting a thread migration to another CPU
+	 * into the Cobalt scheduler state must happen from secondary
+	 * mode only, on behalf of the migrated thread itself once it
+	 * runs on the target CPU.
+	 *
+	 * This means that the Cobalt scheduler state regarding the
+	 * CPU information lags behind the host scheduler state until
+	 * the migrated thread switches back to primary mode
+	 * (i.e. task_cpu(p) != xnsched_cpu(xnthread_from_task(p)->sched)).
+	 * This is ok since Cobalt does not schedule such thread until then.
+	 *
+	 * check_affinity() detects when a Cobalt thread switching
+	 * back to primary mode did move to another CPU earlier while
+	 * in secondary mode. If so, do the fixups to reflect the
+	 * change.
+	 */
+	if (!xnsched_threading_cpu(cpu)) {
+		/*
+		 * The thread is about to switch to primary mode on a
+		 * non-rt CPU, which is damn wrong and hopeless.
+		 * Whine and cancel that thread.
+		 */
+		printk(XENO_WARNING "thread %s[%d] switched to non-rt CPU%d, aborted.\n",
+		       thread->name, xnthread_host_pid(thread), cpu);
+		/*
+		 * Can't call xnthread_cancel() from a migration
+		 * point, that would break. Since we are on the wakeup
+		 * path to hardening, just raise XNCANCELD to catch it
+		 * in xnthread_harden().
+		 */
+		xnthread_set_info(thread, XNCANCELD);
+		return;
+	}
+
+	sched = xnsched_struct(cpu);
+	if (sched == thread->sched)
+		return;
+
+	/*
+	 * The current thread moved to a supported real-time CPU,
+	 * which is not part of its original affinity mask
+	 * though. Assume user wants to extend this mask.
+	 */
+	if (!cpumask_test_cpu(cpu, &thread->affinity))
+		cpumask_set_cpu(cpu, &thread->affinity);
+
+	xnthread_run_handler_stack(thread, move_thread, cpu);
+	xnthread_migrate_passive(thread, sched);
+}
+#endif /* CONFIG_SMP */
+
+static void __handle_taskexit_event(struct task_struct *p)
+{
+	struct cobalt_ppd *sys_ppd;
+	struct xnthread *thread;
+	spl_t s;
+
+	/*
+	 * We are called for both kernel and user shadows over the
+	 * root thread.
+	 */
+	secondary_mode_only();
+
+	thread = xnthread_current();
+	XENO_BUG_ON(COBALT, thread == NULL);
+	trace_cobalt_shadow_unmap(thread);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(thread, XNSSTEP))
+		cobalt_unregister_debugged_thread(thread);
+
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnthread_run_handler_stack(thread, exit_thread);
+
+	if (xnthread_test_state(thread, XNUSER)) {
+		cobalt_umm_free(&cobalt_kernel_ppd.umm, thread->u_window);
+		thread->u_window = NULL;
+		sys_ppd = cobalt_ppd_get(0);
+		if (atomic_dec_and_test(&sys_ppd->refcnt))
+			cobalt_remove_process(cobalt_current_process());
+	}
+}
+
+int cobalt_handle_user_return(struct task_struct *task)
+{
+	struct xnthread *thread;
+	spl_t s;
+	int err;
+
+	thread = xnthread_from_task(task);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	if (xnthread_test_info(thread, XNCONTHI)) {
+		xnlock_get_irqsave(&nklock, s);
+		xnthread_clear_info(thread, XNCONTHI);
+		xnlock_put_irqrestore(&nklock, s);
+
+		err = xnthread_harden();
+
+		/*
+		 * XNCONTHI may or may not have been re-applied if
+		 * harden bailed out due to pending signals. Make sure
+		 * it is set in that case.
+		 */
+		if (err == -ERESTARTSYS) {
+			xnlock_get_irqsave(&nklock, s);
+			xnthread_set_info(thread, XNCONTHI);
+			xnlock_put_irqrestore(&nklock, s);
+		}
+	}
+
+	return KEVENT_PROPAGATE;
+}
+
+static void detach_current(void)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+
+	p->thread = NULL;
+	p->process = NULL;
+}
+
+int cobalt_handle_taskexit_event(struct task_struct *task) /* task == current */
+{
+	__handle_taskexit_event(task);
+
+	/*
+	 * __xnthread_cleanup() -> ... -> finalize_thread
+	 * handler. From that point, the TCB is dropped. Be careful of
+	 * not treading on stale memory within @thread.
+	 */
+	__xnthread_cleanup(xnthread_current());
+
+	detach_current();
+
+	return KEVENT_PROPAGATE;
+}
+
+int cobalt_handle_cleanup_event(struct mm_struct *mm)
+{
+	struct cobalt_process *old, *process;
+	struct cobalt_ppd *sys_ppd;
+	struct xnthread *curr;
+
+	/*
+	 * We are NOT called for exiting kernel shadows.
+	 * cobalt_current_process() is cleared if we get there after
+	 * handle_task_exit(), so we need to restore this context
+	 * pointer temporarily.
+	 */
+	process = cobalt_search_process(mm);
+	old = cobalt_set_process(process);
+	sys_ppd = cobalt_ppd_get(0);
+	if (sys_ppd != &cobalt_kernel_ppd) {
+		bool running_exec;
+
+		/*
+		 * Detect a userland shadow running exec(), i.e. still
+		 * attached to the current linux task (no prior
+		 * detach_current). In this case, we emulate a task
+		 * exit, since the Xenomai binding shall not survive
+		 * the exec() syscall. Since the process will keep on
+		 * running though, we have to disable the event
+		 * notifier manually for it.
+		 */
+		curr = xnthread_current();
+		running_exec = curr && (current->flags & PF_EXITING) == 0;
+		if (running_exec) {
+			__handle_taskexit_event(current);
+			pipeline_cleanup_process();
+		}
+		if (atomic_dec_and_test(&sys_ppd->refcnt))
+			cobalt_remove_process(process);
+		if (running_exec) {
+			__xnthread_cleanup(curr);
+			detach_current();
+		}
+	}
+
+	/*
+	 * CAUTION: Do not override a state change caused by
+	 * cobalt_remove_process().
+	 */
+	if (cobalt_current_process() == process)
+		cobalt_set_process(old);
+
+	return KEVENT_PROPAGATE;
+}
+
+static int attach_process(struct cobalt_process *process)
+{
+	struct cobalt_ppd *p = &process->sys_ppd;
+	char *exe_path;
+	int ret;
+
+	ret = cobalt_umm_init(&p->umm, CONFIG_XENO_OPT_PRIVATE_HEAPSZ * 1024,
+			      post_ppd_release);
+	if (ret)
+		return ret;
+
+	cobalt_umm_set_name(&p->umm, "private heap[%d]", task_pid_nr(current));
+
+	ret = pipeline_attach_process(process);
+	if (ret)
+		goto fail_pipeline;
+
+	exe_path = get_exe_path(current);
+	if (IS_ERR(exe_path)) {
+		printk(XENO_WARNING
+		       "%s[%d] can't find exe path\n",
+		       current->comm, task_pid_nr(current));
+		exe_path = NULL; /* Not lethal, but weird. */
+	}
+	p->exe_path = exe_path;
+	xntree_init(&p->fds);
+	atomic_set(&p->refcnt, 1);
+
+	ret = process_hash_enter(process);
+	if (ret)
+		goto fail_hash;
+
+	return 0;
+fail_hash:
+	pipeline_detach_process(process);
+	if (p->exe_path)
+		kfree(p->exe_path);
+fail_pipeline:
+	cobalt_umm_destroy(&p->umm);
+
+	return ret;
+}
+
+static void *cobalt_process_attach(void)
+{
+	struct cobalt_process *process;
+	int ret;
+
+	process = kzalloc(sizeof(*process), GFP_KERNEL);
+	if (process == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = attach_process(process);
+	if (ret) {
+		kfree(process);
+		return ERR_PTR(ret);
+	}
+
+	INIT_LIST_HEAD(&process->resources.condq);
+	INIT_LIST_HEAD(&process->resources.mutexq);
+	INIT_LIST_HEAD(&process->resources.semq);
+	INIT_LIST_HEAD(&process->resources.monitorq);
+	INIT_LIST_HEAD(&process->resources.eventq);
+	INIT_LIST_HEAD(&process->resources.schedq);
+	INIT_LIST_HEAD(&process->sigwaiters);
+	INIT_LIST_HEAD(&process->thread_list);
+	xntree_init(&process->usems);
+	bitmap_fill(process->timers_map, CONFIG_XENO_OPT_NRTIMERS);
+	cobalt_set_process(process);
+
+	return process;
+}
+
+static void detach_process(struct cobalt_process *process)
+{
+	struct cobalt_ppd *p = &process->sys_ppd;
+
+	if (p->exe_path)
+		kfree(p->exe_path);
+
+	rtdm_fd_cleanup(p);
+	process_hash_remove(process);
+	/*
+	 * CAUTION: the process descriptor might be immediately
+	 * released as a result of calling cobalt_umm_destroy(), so we
+	 * must do this last, not to tread on stale memory.
+	 */
+	cobalt_umm_destroy(&p->umm);
+}
+
+static void __reclaim_resource(struct cobalt_process *process,
+			       void (*reclaim)(struct cobalt_resnode *node, spl_t s),
+			       struct list_head *local,
+			       struct list_head *global)
+{
+	struct cobalt_resnode *node, *tmp;
+	LIST_HEAD(stash);
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(global))
+		goto flush_local;
+
+	list_for_each_entry_safe(node, tmp, global, next) {
+		if (node->owner == process) {
+			list_del(&node->next);
+			list_add(&node->next, &stash);
+		}
+	}
+		
+	list_for_each_entry_safe(node, tmp, &stash, next) {
+		reclaim(node, s);
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	XENO_BUG_ON(COBALT, !list_empty(&stash));
+
+flush_local:
+	if (list_empty(local))
+		goto out;
+
+	list_for_each_entry_safe(node, tmp, local, next) {
+		reclaim(node, s);
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+#define cobalt_reclaim_resource(__process, __reclaim, __type)		\
+	__reclaim_resource(__process, __reclaim,			\
+			   &(__process)->resources.__type ## q,		\
+			   &cobalt_global_resources.__type ## q)
+
+static void cobalt_process_detach(void *arg)
+{
+	struct cobalt_process *process = arg;
+
+	cobalt_nsem_reclaim(process);
+ 	cobalt_timer_reclaim(process);
+ 	cobalt_sched_reclaim(process);
+	cobalt_reclaim_resource(process, cobalt_cond_reclaim, cond);
+	cobalt_reclaim_resource(process, cobalt_mutex_reclaim, mutex);
+	cobalt_reclaim_resource(process, cobalt_event_reclaim, event);
+	cobalt_reclaim_resource(process, cobalt_monitor_reclaim, monitor);
+	cobalt_reclaim_resource(process, cobalt_sem_reclaim, sem);
+ 	detach_process(process);
+	/*
+	 * The cobalt_process descriptor release may be deferred until
+	 * the last mapping on the private heap is gone. However, this
+	 * is potentially stale memory already.
+	 */
+}
+
+struct xnthread_personality cobalt_personality = {
+	.name = "cobalt",
+	.magic = 0,
+	.ops = {
+		.attach_process = cobalt_process_attach,
+		.detach_process = cobalt_process_detach,
+		.map_thread = cobalt_thread_map,
+		.exit_thread = cobalt_thread_exit,
+		.finalize_thread = cobalt_thread_finalize,
+	},
+};
+EXPORT_SYMBOL_GPL(cobalt_personality);
+
+__init int cobalt_init(void)
+{
+	unsigned int i, size;
+	int ret;
+
+	size = sizeof(*process_hash) * PROCESS_HASH_SIZE;
+	process_hash = kmalloc(size, GFP_KERNEL);
+	if (process_hash == NULL) {
+		printk(XENO_ERR "cannot allocate processes hash table\n");
+		return -ENOMEM;
+	}
+
+	ret = xndebug_init();
+	if (ret)
+		goto fail_debug;
+
+	for (i = 0; i < PROCESS_HASH_SIZE; i++)
+		INIT_HLIST_HEAD(&process_hash[i]);
+
+	xnsynch_init(&yield_sync, XNSYNCH_FIFO, NULL);
+
+	ret = cobalt_memdev_init();
+	if (ret)
+		goto fail_memdev;
+
+	ret = cobalt_register_personality(&cobalt_personality);
+	if (ret)
+		goto fail_register;
+
+	ret = cobalt_signal_init();
+	if (ret)
+		goto fail_siginit;
+
+	ret = pipeline_trap_kevents();
+	if (ret)
+		goto fail_kevents;
+
+	if (gid_arg != -1)
+		printk(XENO_INFO "allowing access to group %d\n", gid_arg);
+
+	return 0;
+fail_kevents:
+	cobalt_signal_cleanup();
+fail_siginit:
+	cobalt_unregister_personality(0);
+fail_register:
+	cobalt_memdev_cleanup();
+fail_memdev:
+	xnsynch_destroy(&yield_sync);
+	xndebug_cleanup();
+fail_debug:
+	kfree(process_hash);
+
+	return ret;
+}
+++ linux-patched/kernel/xenomai/posix/nsem.c	2022-03-21 12:58:29.019892569 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/thread.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/err.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/tree.h>
+#include "internal.h"
+#include "sem.h"
+#include "thread.h"
+#include <trace/events/cobalt-posix.h>
+
+DEFINE_PRIVATE_XNLOCK(named_sem_lock);
+
+struct cobalt_named_sem {
+	struct cobalt_sem *sem;
+	struct cobalt_sem_shadow __user *usem;
+	unsigned int refs;
+	struct xnid id;
+};
+
+static struct cobalt_named_sem *
+sem_search(struct cobalt_process *process, xnhandle_t handle)
+{
+	struct xnid *i;
+
+	i = xnid_fetch(&process->usems, handle);
+	if (i == NULL)
+		return NULL;
+
+	return container_of(i, struct cobalt_named_sem, id);
+}
+
+static struct cobalt_sem_shadow __user *
+sem_open(struct cobalt_process *process,
+	 struct cobalt_sem_shadow __user *ushadow,
+	 struct filename *filename, int oflags, mode_t mode,
+	 unsigned int value)
+{
+	const char *name = filename->name;
+	struct cobalt_sem_shadow shadow;
+	struct cobalt_named_sem *u, *v;
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	spl_t s;
+	int rc;
+
+	if (name[0] != '/' || name[1] == '\0')
+		return ERR_PTR(-EINVAL);
+
+  retry_bind:
+	rc = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle);
+	switch (rc) {
+	case 0:
+		/* Found */
+		if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+			return ERR_PTR(-EEXIST);
+
+		xnlock_get_irqsave(&named_sem_lock, s);
+		u = sem_search(process, handle);
+		if (u) {
+			++u->refs;
+			xnlock_put_irqrestore(&named_sem_lock, s);
+			return u->usem;
+		}
+		xnlock_put_irqrestore(&named_sem_lock, s);
+
+		xnlock_get_irqsave(&nklock, s);
+		sem = xnregistry_lookup(handle, NULL);
+		if (sem && sem->magic != COBALT_SEM_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return ERR_PTR(-EINVAL);
+		}
+
+		if (sem) {
+			++sem->refs;
+			xnlock_put_irqrestore(&nklock, s);
+		} else {
+			xnlock_put_irqrestore(&nklock, s);
+			goto retry_bind;
+		}
+
+		__cobalt_sem_shadow_init(sem, COBALT_NAMED_SEM_MAGIC, &shadow);
+		break;
+
+	case -EWOULDBLOCK:
+		/* Not found */
+		if ((oflags & O_CREAT) == 0)
+			return ERR_PTR(-ENOENT);
+
+		shadow.magic = 0;
+		sem = __cobalt_sem_init(&name[1], &shadow,
+					SEM_PSHARED | SEM_NAMED, value);
+		if (IS_ERR(sem)) {
+			rc = PTR_ERR(sem);
+			if (rc == -EEXIST)
+				goto retry_bind;
+			return ERR_PTR(rc);
+		}
+
+		sem->pathname = filename;
+		handle = shadow.handle;
+		break;
+
+	default:
+		return ERR_PTR(rc);
+	}
+
+	if (cobalt_copy_to_user(ushadow, &shadow, sizeof(shadow))) {
+		__cobalt_sem_destroy(handle);
+		return ERR_PTR(-EFAULT);
+	}
+
+	u = xnmalloc(sizeof(*u));
+	if (u == NULL) {
+		__cobalt_sem_destroy(handle);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	u->sem = sem;
+	u->usem = ushadow;
+	u->refs = 1;
+
+	xnlock_get_irqsave(&named_sem_lock, s);
+	v = sem_search(process, handle);
+	if (v) {
+		++v->refs;
+		xnlock_put_irqrestore(&named_sem_lock, s);
+		xnlock_get_irqsave(&nklock, s);
+		--sem->refs;
+		xnlock_put_irqrestore(&nklock, s);
+		putname(filename);
+		xnfree(u);
+		u = v;
+	} else {
+		xnid_enter(&process->usems, &u->id, handle);
+		xnlock_put_irqrestore(&named_sem_lock, s);
+	}
+
+	trace_cobalt_psem_open(name, handle, oflags, mode, value);
+
+	return u->usem;
+}
+
+static int sem_close(struct cobalt_process *process, xnhandle_t handle)
+{
+	struct cobalt_named_sem *u;
+	spl_t s;
+	int err;
+
+	xnlock_get_irqsave(&named_sem_lock, s);
+	u = sem_search(process, handle);
+	if (u == NULL) {
+		err = -ENOENT;
+		goto err_unlock;
+	}
+
+	if (--u->refs) {
+		err = 0;
+		goto err_unlock;
+	}
+
+	xnid_remove(&process->usems, &u->id);
+	xnlock_put_irqrestore(&named_sem_lock, s);
+
+	__cobalt_sem_destroy(handle);
+
+	xnfree(u);
+	return 1;
+
+  err_unlock:
+	xnlock_put_irqrestore(&named_sem_lock, s);
+	return err;
+}
+
+struct cobalt_sem_shadow __user *
+__cobalt_sem_open(struct cobalt_sem_shadow __user *usm,
+		  const char __user *u_name,
+		  int oflags, mode_t mode, unsigned int value)
+{
+	struct cobalt_process *process;
+	struct filename *filename;
+
+	process = cobalt_current_process();
+	if (process == NULL)
+		return ERR_PTR(-EPERM);
+
+	filename = getname(u_name);
+	if (IS_ERR(filename))
+		return ERR_CAST(filename);
+
+	usm = sem_open(process, usm, filename, oflags, mode, value);
+	if (IS_ERR(usm)) {
+		trace_cobalt_psem_open_failed(filename->name, oflags, mode,
+					      value, PTR_ERR(usm));
+		putname(filename);
+	}
+
+	return usm;
+}
+
+COBALT_SYSCALL(sem_open, lostage,
+	       (struct cobalt_sem_shadow __user *__user *u_addrp,
+		const char __user *u_name,
+		int oflags, mode_t mode, unsigned int value))
+{
+	struct cobalt_sem_shadow __user *usm;
+
+	if (__xn_get_user(usm, u_addrp))
+		return -EFAULT;
+
+	usm = __cobalt_sem_open(usm, u_name, oflags, mode, value);
+	if (IS_ERR(usm))
+		return PTR_ERR(usm);
+
+	return __xn_put_user(usm, u_addrp) ? -EFAULT : 0;
+}
+
+COBALT_SYSCALL(sem_close, lostage,
+	       (struct cobalt_sem_shadow __user *usm))
+{
+	struct cobalt_process *process;
+	xnhandle_t handle;
+
+	process = cobalt_current_process();
+	if (process == NULL)
+		return -EPERM;
+
+	handle = cobalt_get_handle_from_user(&usm->handle);
+	trace_cobalt_psem_close(handle);
+
+	return sem_close(process, handle);
+}
+
+static inline int sem_unlink(const char *name)
+{
+	xnhandle_t handle;
+	int ret;
+
+	if (name[0] != '/')
+		return -EINVAL;
+
+	ret = xnregistry_bind(name + 1, XN_NONBLOCK, XN_RELATIVE, &handle);
+	if (ret == -EWOULDBLOCK)
+		return -ENOENT;
+
+	if (__cobalt_sem_destroy(handle) == -EBUSY)
+		xnregistry_unlink(xnregistry_key(handle));
+
+	return 0;
+}
+
+COBALT_SYSCALL(sem_unlink, lostage,
+	       (const char __user *u_name))
+{
+	struct filename *filename;
+	int ret;
+
+	filename = getname(u_name);
+	if (IS_ERR(filename))
+		return PTR_ERR(filename);
+
+	trace_cobalt_psem_unlink(filename->name);
+	ret = sem_unlink(filename->name);
+	putname(filename);
+
+	return ret;
+}
+
+static void reclaim_named_sem(void *arg, struct xnid *i)
+{
+	struct cobalt_process *process = arg;
+	struct cobalt_named_sem *u;
+
+	u = container_of(i, struct cobalt_named_sem, id);
+	u->refs = 1;
+	sem_close(process, xnid_key(i));
+}
+
+void cobalt_nsem_reclaim(struct cobalt_process *process)
+{
+	xntree_cleanup(&process->usems, process, reclaim_named_sem);
+}
+++ linux-patched/kernel/xenomai/posix/thread.c	2022-03-21 12:58:29.015892608 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/cred.h>
+#include <linux/jhash.h>
+#include <linux/signal.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "signal.h"
+#include "timer.h"
+#include "clock.h"
+#include "sem.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/cobalt-posix.h>
+
+xnticks_t cobalt_time_slice = CONFIG_XENO_OPT_RR_QUANTUM * 1000;
+
+#define PTHREAD_HSLOTS (1 << 8)	/* Must be a power of 2 */
+
+/* Process-local index, pthread_t x mm_struct (cobalt_local_hkey). */
+struct local_thread_hash {
+	pid_t pid;
+	struct cobalt_thread *thread;
+	struct cobalt_local_hkey hkey;
+	struct local_thread_hash *next;
+};
+
+/* System-wide index on task_pid_nr(). */
+struct global_thread_hash {
+	pid_t pid;
+	struct cobalt_thread *thread;
+	struct global_thread_hash *next;
+};
+
+static struct local_thread_hash *local_index[PTHREAD_HSLOTS];
+
+static struct global_thread_hash *global_index[PTHREAD_HSLOTS];
+
+static inline struct local_thread_hash *
+thread_hash(const struct cobalt_local_hkey *hkey,
+	    struct cobalt_thread *thread, pid_t pid)
+{
+	struct global_thread_hash **ghead, *gslot;
+	struct local_thread_hash **lhead, *lslot;
+	u32 hash;
+	void *p;
+	spl_t s;
+
+	p = xnmalloc(sizeof(*lslot) + sizeof(*gslot));
+	if (p == NULL)
+		return NULL;
+
+	lslot = p;
+	lslot->hkey = *hkey;
+	lslot->thread = thread;
+	lslot->pid = pid;
+	hash = jhash2((u32 *)&lslot->hkey,
+		      sizeof(lslot->hkey) / sizeof(u32), 0);
+	lhead = &local_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	gslot = p + sizeof(*lslot);
+	gslot->pid = pid;
+	gslot->thread = thread;
+	hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
+	ghead = &global_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	xnlock_get_irqsave(&nklock, s);
+	lslot->next = *lhead;
+	*lhead = lslot;
+	gslot->next = *ghead;
+	*ghead = gslot;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return lslot;
+}
+
+static inline void thread_unhash(const struct cobalt_local_hkey *hkey)
+{
+	struct global_thread_hash **gtail, *gslot;
+	struct local_thread_hash **ltail, *lslot;
+	pid_t pid;
+	u32 hash;
+	spl_t s;
+
+	hash = jhash2((u32 *) hkey, sizeof(*hkey) / sizeof(u32), 0);
+	ltail = &local_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	lslot = *ltail;
+	while (lslot &&
+	       (lslot->hkey.u_pth != hkey->u_pth ||
+		lslot->hkey.mm != hkey->mm)) {
+		ltail = &lslot->next;
+		lslot = *ltail;
+	}
+
+	if (lslot == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return;
+	}
+
+	*ltail = lslot->next;
+	pid = lslot->pid;
+	hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
+	gtail = &global_index[hash & (PTHREAD_HSLOTS - 1)];
+	gslot = *gtail;
+	while (gslot && gslot->pid != pid) {
+		gtail = &gslot->next;
+		gslot = *gtail;
+	}
+	/* gslot must be found here. */
+	XENO_BUG_ON(COBALT, !(gslot && gtail));
+	*gtail = gslot->next;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnfree(lslot);
+}
+
+static struct cobalt_thread *
+thread_lookup(const struct cobalt_local_hkey *hkey)
+{
+	struct local_thread_hash *lslot;
+	struct cobalt_thread *thread;
+	u32 hash;
+	spl_t s;
+
+	hash = jhash2((u32 *)hkey, sizeof(*hkey) / sizeof(u32), 0);
+	lslot = local_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	while (lslot != NULL &&
+	       (lslot->hkey.u_pth != hkey->u_pth || lslot->hkey.mm != hkey->mm))
+		lslot = lslot->next;
+
+	thread = lslot ? lslot->thread : NULL;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return thread;
+}
+
+struct cobalt_thread *cobalt_thread_find(pid_t pid) /* nklocked, IRQs off */
+{
+	struct global_thread_hash *gslot;
+	u32 hash;
+
+	hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
+
+	gslot = global_index[hash & (PTHREAD_HSLOTS - 1)];
+	while (gslot && gslot->pid != pid)
+		gslot = gslot->next;
+
+	return gslot ? gslot->thread : NULL;
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_find);
+
+struct cobalt_thread *cobalt_thread_find_local(pid_t pid) /* nklocked, IRQs off */
+{
+	struct cobalt_thread *thread;
+
+	thread = cobalt_thread_find(pid);
+	if (thread == NULL || thread->hkey.mm != current->mm)
+		return NULL;
+
+	return thread;
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_find_local);
+
+struct cobalt_thread *cobalt_thread_lookup(unsigned long pth) /* nklocked, IRQs off */
+{
+	struct cobalt_local_hkey hkey;
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	return thread_lookup(&hkey);
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_lookup);
+
+void cobalt_thread_map(struct xnthread *curr)
+{
+	struct cobalt_thread *thread;
+
+	thread = container_of(curr, struct cobalt_thread, threadbase);
+	thread->process = cobalt_current_process();
+	XENO_BUG_ON(COBALT, thread->process == NULL);
+}
+
+struct xnthread_personality *cobalt_thread_exit(struct xnthread *curr)
+{
+	struct cobalt_thread *thread;
+	spl_t s;
+
+	thread = container_of(curr, struct cobalt_thread, threadbase);
+	/*
+	 * Unhash first, to prevent further access to the TCB from
+	 * userland.
+	 */
+	thread_unhash(&thread->hkey);
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_mark_deleted(thread);
+	list_del(&thread->next);
+	xnlock_put_irqrestore(&nklock, s);
+	cobalt_signal_flush(thread);
+	xnsynch_destroy(&thread->monitor_synch);
+	xnsynch_destroy(&thread->sigwait);
+
+	return NULL;
+}
+
+struct xnthread_personality *cobalt_thread_finalize(struct xnthread *zombie)
+{
+	struct cobalt_thread *thread;
+
+	thread = container_of(zombie, struct cobalt_thread, threadbase);
+	xnfree(thread);
+
+	return NULL;
+}
+
+int __cobalt_thread_setschedparam_ex(struct cobalt_thread *thread, int policy,
+				     const struct sched_param_ex *param_ex)
+{
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+	xnticks_t tslice;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!cobalt_obj_active(thread, COBALT_THREAD_MAGIC,
+			       struct cobalt_thread)) {
+		ret = -ESRCH;
+		goto out;
+	}
+
+	tslice = thread->threadbase.rrperiod;
+	sched_class = cobalt_sched_policy_param(&param, policy,
+						param_ex, &tslice);
+	if (sched_class == NULL) {
+		ret = -EINVAL;
+		goto out;
+	}
+	xnthread_set_slice(&thread->threadbase, tslice);
+	if (cobalt_call_extension(thread_setsched, &thread->extref, ret,
+				  sched_class, &param) && ret)
+		goto out;
+	ret = xnthread_set_schedparam(&thread->threadbase,
+				      sched_class, &param);
+	xnsched_run();
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_thread_getschedparam_ex(struct cobalt_thread *thread,
+				     int *policy_r,
+				     struct sched_param_ex *param_ex)
+{
+	struct xnsched_class *base_class;
+	struct xnthread *base_thread;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!cobalt_obj_active(thread, COBALT_THREAD_MAGIC,
+			       struct cobalt_thread)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -ESRCH;
+	}
+
+	base_thread = &thread->threadbase;
+	base_class = base_thread->base_class;
+	*policy_r = base_class->policy;
+
+	param_ex->sched_priority = xnthread_base_priority(base_thread);
+	if (param_ex->sched_priority == 0) /* SCHED_FIFO/SCHED_WEAK */
+		*policy_r = SCHED_NORMAL;
+
+	if (base_class == &xnsched_class_rt) {
+		if (xnthread_test_state(base_thread, XNRRB)) {
+			u_ns2ts(&param_ex->sched_rr_quantum, base_thread->rrperiod);
+			*policy_r = SCHED_RR;
+		}
+		goto out;
+	}
+
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	if (base_class == &xnsched_class_weak) {
+		if (*policy_r != SCHED_WEAK)
+			param_ex->sched_priority = -param_ex->sched_priority;
+		goto out;
+	}
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	if (base_class == &xnsched_class_sporadic) {
+		param_ex->sched_ss_low_priority = base_thread->pss->param.low_prio;
+		u_ns2ts(&param_ex->sched_ss_repl_period, base_thread->pss->param.repl_period);
+		u_ns2ts(&param_ex->sched_ss_init_budget, base_thread->pss->param.init_budget);
+		param_ex->sched_ss_max_repl = base_thread->pss->param.max_repl;
+		goto out;
+	}
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	if (base_class == &xnsched_class_tp) {
+		param_ex->sched_tp_partition =
+			base_thread->tps - base_thread->sched->tp.partitions;
+		goto out;
+	}
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	if (base_class == &xnsched_class_quota) {
+		param_ex->sched_quota_group = base_thread->quota->tgid;
+		goto out;
+	}
+#endif
+
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static int pthread_create(struct cobalt_thread **thread_p,
+			  int policy,
+			  const struct sched_param_ex *param_ex,
+			  struct task_struct *task)
+{
+	struct cobalt_process *process = cobalt_current_process();
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+	struct xnthread_init_attr iattr;
+	struct cobalt_thread *thread;
+	xnticks_t tslice;
+	int ret, n;
+	spl_t s;
+
+	thread = xnmalloc(sizeof(*thread));
+	if (thread == NULL)
+		return -EAGAIN;
+
+	tslice = cobalt_time_slice;
+	sched_class = cobalt_sched_policy_param(&param, policy,
+						param_ex, &tslice);
+	if (sched_class == NULL) {
+		xnfree(thread);
+		return -EINVAL;
+	}
+
+	iattr.name = task->comm;
+	iattr.flags = XNUSER|XNFPU;
+	iattr.personality = &cobalt_personality;
+	iattr.affinity = CPU_MASK_ALL;
+	ret = xnthread_init(&thread->threadbase, &iattr, sched_class, &param);
+	if (ret) {
+		xnfree(thread);
+		return ret;
+	}
+
+	thread->magic = COBALT_THREAD_MAGIC;
+	xnsynch_init(&thread->monitor_synch, XNSYNCH_FIFO, NULL);
+
+	xnsynch_init(&thread->sigwait, XNSYNCH_FIFO, NULL);
+	sigemptyset(&thread->sigpending);
+	for (n = 0; n < _NSIG; n++)
+		INIT_LIST_HEAD(thread->sigqueues + n);
+
+	xnthread_set_slice(&thread->threadbase, tslice);
+	cobalt_set_extref(&thread->extref, NULL, NULL);
+
+	/*
+	 * We need an anonymous registry entry to obtain a handle for
+	 * fast mutex locking.
+	 */
+	ret = xnthread_register(&thread->threadbase, "");
+	if (ret) {
+		xnsynch_destroy(&thread->monitor_synch);
+		xnsynch_destroy(&thread->sigwait);
+		__xnthread_discard(&thread->threadbase);
+		xnfree(thread);
+		return ret;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&thread->next, process ? &process->thread_list
+					     : &cobalt_global_thread_list);
+	xnlock_put_irqrestore(&nklock, s);
+
+	thread->hkey.u_pth = 0;
+	thread->hkey.mm = NULL;
+
+	*thread_p = thread;
+
+	return 0;
+}
+
+static void pthread_discard(struct cobalt_thread *thread)
+{
+	spl_t s;
+
+	xnsynch_destroy(&thread->monitor_synch);
+	xnsynch_destroy(&thread->sigwait);
+
+	xnlock_get_irqsave(&nklock, s);
+	list_del(&thread->next);
+	xnlock_put_irqrestore(&nklock, s);
+	__xnthread_discard(&thread->threadbase);
+	xnfree(thread);
+}
+
+static inline int pthread_setmode_np(int clrmask, int setmask, int *mode_r)
+{
+	const int valid_flags = XNLOCK|XNWARN|XNTRAPLB;
+	int old;
+
+	/*
+	 * The conforming mode bit is actually zero, since jumping to
+	 * this code entailed switching to primary mode already.
+	 */
+	if ((clrmask & ~valid_flags) != 0 || (setmask & ~valid_flags) != 0)
+		return -EINVAL;
+
+	old = xnthread_set_mode(clrmask, setmask);
+	if (mode_r)
+		*mode_r = old;
+
+	if ((clrmask & ~setmask) & XNLOCK)
+		/* Reschedule if the scheduler has been unlocked. */
+		xnsched_run();
+
+	return 0;
+}
+
+static struct cobalt_thread *thread_lookup_or_shadow(unsigned long pth,
+						     __u32 __user *u_winoff,
+						     int *promoted_r)
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+
+	*promoted_r = 0;
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+
+	thread = thread_lookup(&hkey);
+	if (thread == NULL) {
+		if (u_winoff == NULL)
+			return ERR_PTR(-ESRCH);
+			
+		thread = cobalt_thread_shadow(&hkey, u_winoff);
+		if (!IS_ERR(thread))
+			*promoted_r = 1;
+	}
+
+	return thread;
+}
+
+int cobalt_thread_setschedparam_ex(unsigned long pth,
+				   int policy,
+				   const struct sched_param_ex *param_ex,
+				   __u32 __user *u_winoff,
+				   int __user *u_promoted)
+{
+	struct cobalt_thread *thread;
+	int ret, promoted;
+
+	trace_cobalt_pthread_setschedparam(pth, policy, param_ex);
+
+	thread = thread_lookup_or_shadow(pth, u_winoff, &promoted);
+	if (IS_ERR(thread))
+		return PTR_ERR(thread);
+
+	ret = __cobalt_thread_setschedparam_ex(thread, policy, param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted));
+}
+
+COBALT_SYSCALL(thread_setschedparam_ex, conforming,
+	       (unsigned long pth,
+		int policy,
+		const struct sched_param_ex __user *u_param,
+		__u32 __user *u_winoff,
+		int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+
+	if (cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex)))
+		return -EFAULT;
+
+	return cobalt_thread_setschedparam_ex(pth, policy, &param_ex,
+					      u_winoff, u_promoted);
+}
+
+int cobalt_thread_getschedparam_ex(unsigned long pth,
+				   int *policy_r,
+				   struct sched_param_ex *param_ex)
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	int ret;
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+	if (thread == NULL)
+		return -ESRCH;
+
+	ret = __cobalt_thread_getschedparam_ex(thread, policy_r, param_ex);
+	if (ret)
+		return ret;
+
+	trace_cobalt_pthread_getschedparam(pth, *policy_r, param_ex);
+
+	return 0;
+}
+
+COBALT_SYSCALL(thread_getschedparam_ex, current,
+	       (unsigned long pth,
+		int __user *u_policy,
+		struct sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_thread_getschedparam_ex(pth, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy));
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_param, &param_ex, sizeof(param_ex));
+}
+
+int cobalt_thread_setschedprio(unsigned long pth,
+			       int prio,
+			       __u32 __user *u_winoff,
+			       int __user *u_promoted)
+{
+	struct sched_param_ex param_ex;
+	struct cobalt_thread *thread;
+	int ret, policy, promoted;
+
+	trace_cobalt_pthread_setschedprio(pth, prio);
+
+	thread = thread_lookup_or_shadow(pth, u_winoff, &promoted);
+	if (IS_ERR(thread))
+		return PTR_ERR(thread);
+
+	ret = __cobalt_thread_getschedparam_ex(thread, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	param_ex.sched_priority = prio;
+
+	ret = __cobalt_thread_setschedparam_ex(thread, policy, &param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted));
+}
+
+COBALT_SYSCALL(thread_setschedprio, conforming,
+	       (unsigned long pth,
+		int prio,
+		__u32 __user *u_winoff,
+		int __user *u_promoted))
+{
+	return cobalt_thread_setschedprio(pth, prio, u_winoff, u_promoted);
+}
+
+int __cobalt_thread_create(unsigned long pth, int policy,
+			   struct sched_param_ex *param_ex,
+			   int xid, __u32 __user *u_winoff)
+{
+	struct cobalt_thread *thread = NULL;
+	struct task_struct *p = current;
+	struct cobalt_local_hkey hkey;
+	int ret;
+
+	trace_cobalt_pthread_create(pth, policy, param_ex);
+
+	/*
+	 * We have been passed the pthread_t identifier the user-space
+	 * Cobalt library has assigned to our caller; we'll index our
+	 * internal pthread_t descriptor in kernel space on it.
+	 */
+	hkey.u_pth = pth;
+	hkey.mm = p->mm;
+
+	ret = pthread_create(&thread, policy, param_ex, p);
+	if (ret)
+		return ret;
+
+	ret = cobalt_map_user(&thread->threadbase, u_winoff);
+	if (ret) {
+		pthread_discard(thread);
+		return ret;
+	}
+
+	if (!thread_hash(&hkey, thread, task_pid_vnr(p))) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+
+	thread->hkey = hkey;
+
+	if (xid > 0 && cobalt_push_personality(xid) == NULL) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	return xnthread_harden();
+fail:
+	xnthread_cancel(&thread->threadbase);
+
+	return ret;
+}
+
+COBALT_SYSCALL(thread_create, init,
+	       (unsigned long pth, int policy,
+		struct sched_param_ex __user *u_param,
+		int xid,
+		__u32 __user *u_winoff))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex));
+	if (ret)
+		return ret;
+
+	return __cobalt_thread_create(pth, policy, &param_ex, xid, u_winoff);
+}
+
+struct cobalt_thread *
+cobalt_thread_shadow(struct cobalt_local_hkey *hkey,
+		     __u32 __user *u_winoff)
+{
+	struct cobalt_thread *thread = NULL;
+	struct sched_param_ex param_ex;
+	int ret;
+
+	if (xnthread_current())
+		return ERR_PTR(-EBUSY);
+
+	param_ex.sched_priority = 0;
+	trace_cobalt_pthread_create(hkey->u_pth, SCHED_NORMAL, &param_ex);
+	ret = pthread_create(&thread, SCHED_NORMAL, &param_ex, current);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = cobalt_map_user(&thread->threadbase, u_winoff);
+	if (ret) {
+		pthread_discard(thread);
+		return ERR_PTR(ret);
+	}
+
+	if (!thread_hash(hkey, thread, task_pid_vnr(current))) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+
+	thread->hkey = *hkey;
+
+	xnthread_harden();
+
+	return thread;
+fail:
+	xnthread_cancel(&thread->threadbase);
+
+	return ERR_PTR(ret);
+}
+
+COBALT_SYSCALL(thread_setmode, primary,
+	       (int clrmask, int setmask, int __user *u_mode_r))
+{
+	int ret, old;
+
+	trace_cobalt_pthread_setmode(clrmask, setmask);
+
+	ret = pthread_setmode_np(clrmask, setmask, &old);
+	if (ret)
+		return ret;
+
+	if (u_mode_r && cobalt_copy_to_user(u_mode_r, &old, sizeof(old)))
+		return -EFAULT;
+
+	return 0;
+}
+
+COBALT_SYSCALL(thread_setname, current,
+	       (unsigned long pth, const char __user *u_name))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	char name[XNOBJECT_NAME_LEN];
+	struct task_struct *p;
+	spl_t s;
+
+	if (cobalt_strncpy_from_user(name, u_name,
+				     sizeof(name) - 1) < 0)
+		return -EFAULT;
+
+	name[sizeof(name) - 1] = '\0';
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+
+	trace_cobalt_pthread_setname(pth, name);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	thread = thread_lookup(&hkey);
+	if (thread == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -ESRCH;
+	}
+
+	ksformat(thread->threadbase.name,
+		 XNOBJECT_NAME_LEN - 1, "%s", name);
+	p = xnthread_host_task(&thread->threadbase);
+	get_task_struct(p);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	knamecpy(p->comm, name);
+	put_task_struct(p);
+
+	return 0;
+}
+
+COBALT_SYSCALL(thread_kill, conforming,
+	       (unsigned long pth, int sig))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	int ret;
+	spl_t s;
+
+	trace_cobalt_pthread_kill(pth, sig);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+	if (thread == NULL)
+		ret = -ESRCH;
+	else
+		ret = __cobalt_kill(thread, sig, 0);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(thread_join, primary, (unsigned long pth))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	spl_t s;
+
+	trace_cobalt_pthread_join(pth);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (thread == NULL)
+		return -ESRCH;
+
+	return xnthread_join(&thread->threadbase, false);
+}
+
+COBALT_SYSCALL(thread_getpid, current, (unsigned long pth))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	pid_t pid;
+	spl_t s;
+
+	trace_cobalt_pthread_pid(pth);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+	if (thread == NULL)
+		pid = -ESRCH;
+	else
+		pid = xnthread_host_pid(&thread->threadbase);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return pid;
+}
+
+COBALT_SYSCALL(thread_getstat, current,
+	       (pid_t pid, struct cobalt_threadstat __user *u_stat))
+{
+	struct cobalt_threadstat stat;
+	struct cobalt_thread *p;
+	struct xnthread *thread;
+	xnticks_t xtime;
+	spl_t s;
+
+	trace_cobalt_pthread_stat(pid);
+
+	if (pid == 0) {
+		thread = xnthread_current();
+		if (thread == NULL)
+			return -EPERM;
+		xnlock_get_irqsave(&nklock, s);
+	} else {
+		xnlock_get_irqsave(&nklock, s);
+		p = cobalt_thread_find(pid);
+		if (p == NULL) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -ESRCH;
+		}
+		thread = &p->threadbase;
+	}
+
+	/* We have to hold the nklock to keep most values consistent. */
+	stat.cpu = xnsched_cpu(thread->sched);
+	stat.cprio = xnthread_current_priority(thread);
+	xtime = xnstat_exectime_get_total(&thread->stat.account);
+	if (thread->sched->curr == thread)
+		xtime += xnstat_exectime_now() -
+			xnstat_exectime_get_last_switch(thread->sched);
+	stat.xtime = xnclock_ticks_to_ns(&nkclock, xtime);
+	stat.msw = xnstat_counter_get(&thread->stat.ssw);
+	stat.csw = xnstat_counter_get(&thread->stat.csw);
+	stat.xsc = xnstat_counter_get(&thread->stat.xsc);
+	stat.pf = xnstat_counter_get(&thread->stat.pf);
+	stat.status = xnthread_get_state(thread);
+	if (thread->lock_count > 0)
+		stat.status |= XNLOCK;
+	stat.timeout = xnthread_get_timeout(thread,
+					    xnclock_read_monotonic(&nkclock));
+	strcpy(stat.name, thread->name);
+	strcpy(stat.personality, thread->personality->name);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return cobalt_copy_to_user(u_stat, &stat, sizeof(stat));
+}
+
+#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION
+
+int cobalt_thread_extend(struct cobalt_extension *ext,
+			 void *priv)
+{
+	struct cobalt_thread *thread = cobalt_current_thread();
+	struct xnthread_personality *prev;
+
+	trace_cobalt_pthread_extend(thread->hkey.u_pth, ext->core.name);
+
+	prev = cobalt_push_personality(ext->core.xid);
+	if (prev == NULL)
+		return -EINVAL;
+
+	cobalt_set_extref(&thread->extref, ext, priv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_extend);
+
+void cobalt_thread_restrict(void)
+{
+	struct cobalt_thread *thread = cobalt_current_thread();
+
+	trace_cobalt_pthread_restrict(thread->hkey.u_pth,
+		      thread->threadbase.personality->name);
+	cobalt_pop_personality(&cobalt_personality);
+	cobalt_set_extref(&thread->extref, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_restrict);
+
+#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+const char *cobalt_trace_parse_sched_params(struct trace_seq *p, int policy,
+					    struct sched_param_ex *params)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+
+	switch (policy) {
+	case SCHED_QUOTA:
+		trace_seq_printf(p, "priority=%d, group=%d",
+				 params->sched_priority,
+				 params->sched_quota_group);
+		break;
+	case SCHED_TP:
+		trace_seq_printf(p, "priority=%d, partition=%d",
+				 params->sched_priority,
+				 params->sched_tp_partition);
+		break;
+	case SCHED_NORMAL:
+		break;
+	case SCHED_SPORADIC:
+		trace_seq_printf(p, "priority=%d, low_priority=%d, "
+				 "budget=(%ld.%09ld), period=(%ld.%09ld), "
+				 "maxrepl=%d",
+				 params->sched_priority,
+				 params->sched_ss_low_priority,
+				 params->sched_ss_init_budget.tv_sec,
+				 params->sched_ss_init_budget.tv_nsec,
+				 params->sched_ss_repl_period.tv_sec,
+				 params->sched_ss_repl_period.tv_nsec,
+				 params->sched_ss_max_repl);
+		break;
+	case SCHED_RR:
+	case SCHED_FIFO:
+	case SCHED_COBALT:
+	case SCHED_WEAK:
+	default:
+		trace_seq_printf(p, "priority=%d", params->sched_priority);
+		break;
+	}
+	trace_seq_putc(p, '\0');
+
+	return ret;
+}
+++ linux-patched/kernel/xenomai/posix/Makefile	2022-03-21 12:58:29.012892638 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/syscall.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/kernel
+
+obj-$(CONFIG_XENOMAI) += xenomai.o
+
+xenomai-y :=		\
+	clock.o		\
+	cond.o		\
+	corectl.o	\
+	event.o		\
+	io.o		\
+	memory.o	\
+	monitor.o	\
+	mqueue.o	\
+	mutex.o		\
+	nsem.o		\
+	process.o	\
+	sched.o		\
+	sem.o		\
+	signal.o	\
+	syscall.o	\
+	thread.o	\
+	timer.o		\
+	timerfd.o
+
+syscall_entries := $(srctree)/$(src)/gen-syscall-entries.sh
+
+quiet_cmd_syscall_entries = GEN     $@
+      cmd_syscall_entries = $(CONFIG_SHELL) '$(syscall_entries)' $(filter-out FORCE,$^) > $@
+
+$(obj)/syscall_entries.h: $(syscall_entries) $(wildcard $(srctree)/$(src)/*.c) FORCE
+	$(call if_changed,syscall_entries)
+
+target += syscall_entries.h
+
+$(obj)/syscall.o: $(obj)/syscall_entries.h
+
+xenomai-$(CONFIG_XENO_ARCH_SYS3264) += compat.o syscall32.o
+++ linux-patched/kernel/xenomai/posix/syscall.c	2022-03-21 12:58:29.008892677 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/event.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>
+ * Copyright (C) 2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/kconfig.h>
+#include <linux/unistd.h>
+#include <cobalt/uapi/corectl.h>
+#include <cobalt/kernel/tree.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/kernel/init.h>
+#include <pipeline/kevents.h>
+#include <pipeline/vdso_fallback.h>
+#include <asm/syscall.h>
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "mutex.h"
+#include "cond.h"
+#include "mqueue.h"
+#include "sem.h"
+#include "signal.h"
+#include "timer.h"
+#include "monitor.h"
+#include "clock.h"
+#include "event.h"
+#include "timerfd.h"
+#include "io.h"
+#include "corectl.h"
+#include "../debug.h"
+#include <trace/events/cobalt-posix.h>
+
+/* Syscall must run into the Linux domain. */
+#define __xn_exec_lostage    0x1
+/* Syscall must run into the Xenomai domain. */
+#define __xn_exec_histage    0x2
+/* Shadow syscall: caller must be mapped. */
+#define __xn_exec_shadow     0x4
+/* Switch back toggle; caller must return to its original mode. */
+#define __xn_exec_switchback 0x8
+/* Exec in current domain. */
+#define __xn_exec_current    0x10
+/* Exec in conforming domain, Xenomai or Linux. */
+#define __xn_exec_conforming 0x20
+/* Attempt syscall restart in the opposite domain upon -ENOSYS. */
+#define __xn_exec_adaptive   0x40
+/* Do not restart syscall upon signal receipt. */
+#define __xn_exec_norestart  0x80
+/* Shorthand for shadow init syscall. */
+#define __xn_exec_init       __xn_exec_lostage
+/* Shorthand for shadow syscall in Xenomai space. */
+#define __xn_exec_primary   (__xn_exec_shadow|__xn_exec_histage)
+/* Shorthand for shadow syscall in Linux space. */
+#define __xn_exec_secondary (__xn_exec_shadow|__xn_exec_lostage)
+/* Shorthand for syscall in Linux space with switchback if shadow. */
+#define __xn_exec_downup    (__xn_exec_lostage|__xn_exec_switchback)
+/* Shorthand for non-restartable primary syscall. */
+#define __xn_exec_nonrestartable (__xn_exec_primary|__xn_exec_norestart)
+/* Domain probing syscall starting in conforming mode. */
+#define __xn_exec_probing   (__xn_exec_conforming|__xn_exec_adaptive)
+/* Hand over mode selection to syscall.  */
+#define __xn_exec_handover  (__xn_exec_current|__xn_exec_adaptive)
+
+typedef long (*cobalt_syshand)(unsigned long arg1, unsigned long arg2,
+			       unsigned long arg3, unsigned long arg4,
+			       unsigned long arg5);
+
+static void prepare_for_signal(struct task_struct *p,
+			       struct xnthread *thread,
+			       struct pt_regs *regs,
+			       int sysflags)
+{
+	int notify = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_info(thread, XNKICKED)) {
+		if (signal_pending(p)) {
+			__xn_error_return(regs,
+					  (sysflags & __xn_exec_norestart) ?
+					  -EINTR : -ERESTARTSYS);
+			notify = !xnthread_test_state(thread, XNSSTEP);
+			xnthread_clear_info(thread, XNBREAK);
+		}
+		xnthread_clear_info(thread, XNKICKED);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnthread_test_cancel();
+
+	xnthread_relax(notify, SIGDEBUG_MIGRATE_SIGNAL);
+}
+
+static COBALT_SYSCALL(migrate, current, (int domain))
+{
+	struct xnthread *thread = xnthread_current();
+
+	if (is_secondary_domain()) {
+		if (domain == COBALT_PRIMARY) {
+			if (thread == NULL)
+				return -EPERM;
+			/*
+			 * Paranoid: a corner case where userland
+			 * fiddles with SIGSHADOW while the target
+			 * thread is still waiting to be started.
+			 */
+			if (xnthread_test_state(thread, XNDORMANT))
+				return 0;
+
+			return xnthread_harden() ? : 1;
+		}
+		return 0;
+	}
+
+	/* We are running on the head stage, apply relax request. */
+	if (domain == COBALT_SECONDARY) {
+		xnthread_relax(0, 0);
+		return 1;
+	}
+
+	return 0;
+}
+
+static COBALT_SYSCALL(trace, current,
+		      (int op, unsigned long a1,
+		       unsigned long a2, unsigned long a3))
+{
+	int ret = -EINVAL;
+
+	switch (op) {
+	case __xntrace_op_max_begin:
+		ret = xntrace_max_begin(a1);
+		break;
+
+	case __xntrace_op_max_end:
+		ret = xntrace_max_end(a1);
+		break;
+
+	case __xntrace_op_max_reset:
+		ret = xntrace_max_reset();
+		break;
+
+	case __xntrace_op_user_start:
+		ret = xntrace_user_start();
+		break;
+
+	case __xntrace_op_user_stop:
+		ret = xntrace_user_stop(a1);
+		break;
+
+	case __xntrace_op_user_freeze:
+		ret = xntrace_user_freeze(a1, a2);
+		break;
+
+	case __xntrace_op_special:
+		ret = xntrace_special(a1 & 0xFF, a2);
+		break;
+
+	case __xntrace_op_special_u64:
+		ret = xntrace_special_u64(a1 & 0xFF,
+					  (((u64) a2) << 32) | a3);
+		break;
+
+	case __xntrace_op_latpeak_freeze:
+		xntrace_latpeak_freeze(a1);
+		ret = 0;
+		break;
+
+	}
+	return ret;
+}
+
+static COBALT_SYSCALL(ftrace_puts, current,
+		      (const char __user *str))
+{
+	char buf[256];
+	unsigned len;
+
+	len = cobalt_strncpy_from_user(buf, str, sizeof(buf));
+	if (len < 0)
+		return -EFAULT;
+
+#ifdef CONFIG_TRACING
+	__trace_puts(_THIS_IP_, buf, len);
+#endif
+
+	return 0;
+}
+
+static COBALT_SYSCALL(archcall, current,
+		      (unsigned long a1, unsigned long a2,
+		       unsigned long a3, unsigned long a4,
+		       unsigned long a5))
+{
+	return xnarch_local_syscall(a1, a2, a3, a4, a5);
+}
+
+static COBALT_SYSCALL(get_current, current,
+		      (xnhandle_t __user *u_handle))
+{
+	struct xnthread *cur = xnthread_current();
+
+	if (cur == NULL)
+		return -EPERM;
+
+	return cobalt_copy_to_user(u_handle, &cur->handle,
+				      sizeof(*u_handle));
+}
+
+static COBALT_SYSCALL(backtrace, lostage,
+		      (int nr, unsigned long __user *u_backtrace, int reason))
+{
+	unsigned long backtrace[SIGSHADOW_BACKTRACE_DEPTH];
+	int ret;
+
+	/*
+	 * In case backtrace() in userland is broken or fails. We may
+	 * want to know about this in kernel space however, for future
+	 * use.
+	 */
+	if (nr <= 0)
+		return 0;
+	/*
+	 * We may omit the older frames if we can't store the full
+	 * backtrace.
+	 */
+	if (nr > SIGSHADOW_BACKTRACE_DEPTH)
+		nr = SIGSHADOW_BACKTRACE_DEPTH;
+	/*
+	 * Fetch the backtrace array, filled with PC values as seen
+	 * from the relaxing thread in user-space. This can't fail
+	 */
+	ret = cobalt_copy_from_user(backtrace, u_backtrace, nr * sizeof(long));
+	if (ret)
+		return ret;
+
+	xndebug_trace_relax(nr, backtrace, reason);
+
+	return 0;
+}
+
+static COBALT_SYSCALL(serialdbg, current,
+		      (const char __user *u_msg, int len))
+{
+	char buf[128];
+	int n;
+
+	while (len > 0) {
+		n = len;
+		if (n > sizeof(buf))
+			n = sizeof(buf);
+		if (cobalt_copy_from_user(buf, u_msg, n))
+			return -EFAULT;
+		raw_printk("%.*s", n, buf);
+		u_msg += n;
+		len -= n;
+	}
+
+	return 0;
+}
+
+static void stringify_feature_set(unsigned long fset, char *buf, int size)
+{
+	unsigned long feature;
+	int nc, nfeat;
+
+	*buf = '\0';
+
+	for (feature = 1, nc = nfeat = 0; fset != 0 && size > 0; feature <<= 1) {
+		if (fset & feature) {
+			nc = ksformat(buf, size, "%s%s",
+				      nfeat > 0 ? " " : "",
+				      get_feature_label(feature));
+			nfeat++;
+			size -= nc;
+			buf += nc;
+			fset &= ~feature;
+		}
+	}
+}
+
+static COBALT_SYSCALL(bind, lostage,
+		      (struct cobalt_bindreq __user *u_breq))
+{
+	unsigned long featreq, featmis;
+	struct cobalt_bindreq breq;
+	struct cobalt_featinfo *f;
+	int abirev;
+
+	if (cobalt_copy_from_user(&breq, u_breq, sizeof(breq)))
+		return -EFAULT;
+
+	f = &breq.feat_ret;
+	featreq = breq.feat_req;
+	if (!realtime_core_running() && (featreq & __xn_feat_control) == 0)
+		return -EAGAIN;
+
+	/*
+	 * Calculate the missing feature set:
+	 * kernel_unavailable_set & user_mandatory_set.
+	 */
+	featmis = (~XENOMAI_FEAT_DEP & (featreq & XENOMAI_FEAT_MAN));
+	abirev = breq.abi_rev;
+
+	/*
+	 * Pass back the supported feature set and the ABI revision
+	 * level to user-space.
+	 */
+	f->feat_all = XENOMAI_FEAT_DEP;
+	stringify_feature_set(XENOMAI_FEAT_DEP, f->feat_all_s,
+			      sizeof(f->feat_all_s));
+	f->feat_man = featreq & XENOMAI_FEAT_MAN;
+	stringify_feature_set(f->feat_man, f->feat_man_s,
+			      sizeof(f->feat_man_s));
+	f->feat_mis = featmis;
+	stringify_feature_set(featmis, f->feat_mis_s,
+			      sizeof(f->feat_mis_s));
+	f->feat_req = featreq;
+	stringify_feature_set(featreq, f->feat_req_s,
+			      sizeof(f->feat_req_s));
+	f->feat_abirev = XENOMAI_ABI_REV;
+	collect_arch_features(f);
+
+	pipeline_collect_features(f);
+	f->vdso_offset = cobalt_umm_offset(&cobalt_ppd_get(1)->umm, nkvdso);
+
+	if (cobalt_copy_to_user(u_breq, &breq, sizeof(breq)))
+		return -EFAULT;
+
+	/*
+	 * If some mandatory features the user-space code relies on
+	 * are missing at kernel level, we cannot go further.
+	 */
+	if (featmis)
+		return -EINVAL;
+
+	if (!check_abi_revision(abirev))
+		return -ENOEXEC;
+
+	return cobalt_bind_core(featreq);
+}
+
+static COBALT_SYSCALL(extend, lostage, (unsigned int magic))
+{
+	return cobalt_bind_personality(magic);
+}
+
+static int CoBaLt_ni(void)
+{
+	return -ENOSYS;
+}
+
+/*
+ * We have a single syscall table for all ABI models, i.e. 64bit
+ * native + 32bit emulation) or plain 32bit.
+ *
+ * The syscall table is set up in a single step, based on three
+ * subsequent sources of initializers:
+ *
+ * - first, all syscall entries are defaulted to a placeholder
+ * returning -ENOSYS (__COBALT_CALL_NI), as the table may be sparse.
+ *
+ * - then __COBALT_CALL_ENTRY() produces a native call entry
+ * (e.g. pure 64bit call handler for a 64bit architecture, 32bit
+ * handler for a 32bit architecture), optionally followed by a set of
+ * 32bit syscall entries offset by an arch-specific base index, which
+ * default to the native calls. These nitty-gritty details are defined
+ * by <asm/xenomai/syscall32.h>. 32bit architectures - or 64bit ones
+ * for which we don't support any 32bit ABI model - will simply define
+ * __COBALT_CALL32_ENTRY() as an empty macro.
+ *
+ * - finally, 32bit thunk entries are generated by including
+ * <asm/xenomai/syscall32-table.h>, overriding the default handlers
+ * installed during the previous step.
+ *
+ * For instance, with CONFIG_IA32_EMULATION support enabled in an
+ * x86_64 kernel, sc_cobalt_mq_timedreceive would appear twice in the
+ * table, as:
+ *
+ * [sc_cobalt_mq_timedreceive] = CoBaLt_mq_timedreceive,
+ * ...
+ * [sc_cobalt_mq_timedreceive + __COBALT_IA32_BASE] = CoBaLt32emu_mq_timedreceive,
+ *
+ * CoBaLt32emu_mq_timedreceive() would do the required thunking for
+ * dealing with the 32<->64bit conversion of arguments. On the other
+ * hand, sc_cobalt_sched_yield - which do not require any thunk -
+ * would also appear twice, but both entries would point at the native
+ * syscall implementation:
+ *
+ * [sc_cobalt_sched_yield] = CoBaLt_sched_yield,
+ * ...
+ * [sc_cobalt_sched_yield + __COBALT_IA32_BASE] = CoBaLt_sched_yield,
+ *
+ * Accordingly, applications targeting the ia32 model issue syscalls
+ * in the range [__COBALT_IA32_BASE..__COBALT_IA32_BASE +
+ * __NR_COBALT_SYSCALLS-1], whilst native (32/64bit) ones issue
+ * syscalls in the range [0..__NR_COBALT_SYSCALLS-1].
+ *
+ * In short, this is an incremental process where the arch-specific
+ * code can override the 32bit syscall entries, pointing at the thunk
+ * routines it may need for handing 32bit calls over their respective
+ * 64bit implementation.
+ *
+ * By convention, there is NO pure 32bit syscall, which means that
+ * each 32bit syscall defined by a compat ABI interface MUST match a
+ * native (64bit) syscall. This is important as we share the call
+ * modes (i.e. __xn_exec_ bits) between all ABI models.
+ *
+ * --rpm
+ */
+#define __syshand__(__name)	((cobalt_syshand)(CoBaLt_ ## __name))
+
+#define __COBALT_NI	__syshand__(ni)
+
+#define __COBALT_CALL_NI				\
+	[0 ... __NR_COBALT_SYSCALLS-1] = __COBALT_NI,	\
+	__COBALT_CALL32_INITHAND(__COBALT_NI)
+
+#define __COBALT_CALL_NFLAGS				\
+	[0 ... __NR_COBALT_SYSCALLS-1] = 0,		\
+	__COBALT_CALL32_INITMODE(0)
+
+#define __COBALT_CALL_ENTRY(__name)				\
+	[sc_cobalt_ ## __name] = __syshand__(__name),		\
+	__COBALT_CALL32_ENTRY(__name, __syshand__(__name))
+
+#define __COBALT_MODE(__name, __mode)	\
+	[sc_cobalt_ ## __name] = __xn_exec_##__mode,
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+#include "syscall32.h"
+#endif
+
+#include "syscall_entries.h"
+
+static const cobalt_syshand cobalt_syscalls[] = {
+	__COBALT_CALL_NI
+	__COBALT_CALL_ENTRIES
+#ifdef CONFIG_XENO_ARCH_SYS3264
+#include <asm/xenomai/syscall32-table.h>
+#endif
+};
+
+static const int cobalt_sysmodes[] = {
+	__COBALT_CALL_NFLAGS
+	__COBALT_CALL_MODES
+};
+
+static inline int allowed_syscall(struct cobalt_process *process,
+				  struct xnthread *thread,
+				  int sysflags, int nr)
+{
+	if (nr == sc_cobalt_bind)
+		return 1;
+	
+	if (process == NULL)
+		return 0;
+
+	if (thread == NULL && (sysflags & __xn_exec_shadow))
+		return 0;
+
+	return cap_raised(current_cap(), CAP_SYS_NICE);
+}
+
+int handle_head_syscall(bool caller_is_relaxed, struct pt_regs *regs)
+{
+	struct cobalt_process *process;
+	int switched, sigs, sysflags;
+	struct xnthread *thread;
+	cobalt_syshand handler;
+	struct task_struct *p;
+	unsigned long args[6];
+	unsigned int nr, code;
+	long ret;
+
+	if (!__xn_syscall_p(regs))
+		goto linux_syscall;
+
+	thread = xnthread_current();
+	code = __xn_syscall(regs);
+	if (code >= ARRAY_SIZE(cobalt_syscalls))
+		goto bad_syscall;
+
+	nr = code & (__NR_COBALT_SYSCALLS - 1);
+
+	trace_cobalt_head_sysentry(code);
+
+	process = cobalt_current_process();
+	if (process == NULL) {
+		process = cobalt_search_process(current->mm);
+		cobalt_set_process(process);
+	}
+
+	handler = cobalt_syscalls[code];
+	sysflags = cobalt_sysmodes[nr];
+
+	/*
+	 * Executing Cobalt services requires CAP_SYS_NICE, except for
+	 * sc_cobalt_bind which does its own checks.
+	 */
+	if (unlikely(!allowed_syscall(process, thread, sysflags, nr))) {
+		/*
+		 * Exclude get_current from reporting, it is used to probe the
+		 * execution context.
+		 */
+		if (XENO_DEBUG(COBALT) && nr != sc_cobalt_get_current)
+			printk(XENO_WARNING
+			       "syscall <%d> denied to %s[%d]\n",
+			       nr, current->comm, task_pid_nr(current));
+		__xn_error_return(regs, -EPERM);
+		goto ret_handled;
+	}
+
+	if (sysflags & __xn_exec_conforming)
+		/*
+		 * If the conforming exec bit is set, turn the exec
+		 * bitmask for the syscall into the most appropriate
+		 * setup for the caller, i.e. Xenomai domain for
+		 * shadow threads, Linux otherwise.
+		 */
+		sysflags |= (thread ? __xn_exec_histage : __xn_exec_lostage);
+
+	/*
+	 * Here we have to dispatch the syscall execution properly,
+	 * depending on:
+	 *
+	 * o Whether the syscall must be run into the Linux or Xenomai
+	 * domain, or indifferently in the current Xenomai domain.
+	 *
+	 * o Whether the caller currently runs in the Linux or Xenomai
+	 * domain.
+	 */
+restart:
+	/*
+	 * Process adaptive syscalls by restarting them in the
+	 * opposite domain upon receiving -ENOSYS from the syscall
+	 * handler.
+	 */
+	switched = 0;
+	if (sysflags & __xn_exec_lostage) {
+		/*
+		 * The syscall must run from the Linux domain.
+		 */
+		if (!caller_is_relaxed) {
+			/*
+			 * Request originates from the Xenomai domain:
+			 * relax the caller then invoke the syscall
+			 * handler right after.
+			 */
+			xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+			switched = 1;
+		} else
+			/*
+			 * Request originates from the Linux domain:
+			 * propagate the event to our Linux-based
+			 * handler, so that the syscall is executed
+			 * from there.
+			 */
+			return KEVENT_PROPAGATE;
+	} else if (sysflags & (__xn_exec_histage | __xn_exec_current)) {
+		/*
+		 * Syscall must run either from the Xenomai domain, or
+		 * from the calling domain.
+		 *
+		 * If the request originates from the Linux domain,
+		 * hand it over to our secondary-mode dispatcher.
+		 * Otherwise, invoke the syscall handler immediately.
+		 */
+		if (caller_is_relaxed)
+			return KEVENT_PROPAGATE;
+	}
+
+	/*
+	 * 'thread' has to be valid from that point: all syscalls
+	 * regular threads may call have been pipelined to the root
+	 * handler (lostage ones), or rejected by allowed_syscall().
+	 */
+
+	p = current;
+	pipeline_get_syscall_args(p, regs, args);
+
+	ret = handler(args[0], args[1], args[2], args[3], args[4]);
+	if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive)) {
+		if (switched) {
+			ret = xnthread_harden();
+			if (ret) {
+				switched = 0;
+				goto done;
+			}
+		} else /* Mark the primary -> secondary transition. */
+			xnthread_set_localinfo(thread, XNDESCENT);
+		sysflags ^=
+		    (__xn_exec_lostage | __xn_exec_histage |
+		     __xn_exec_adaptive);
+		goto restart;
+	}
+done:
+	__xn_status_return(regs, ret);
+	sigs = 0;
+	if (!xnsched_root_p()) {
+		if (signal_pending(p) ||
+		    xnthread_test_info(thread, XNKICKED)) {
+			sigs = 1;
+			prepare_for_signal(p, thread, regs, sysflags);
+		} else if (xnthread_test_state(thread, XNWEAK) &&
+			   thread->res_count == 0) {
+			if (switched)
+				switched = 0;
+			else
+				xnthread_relax(0, 0);
+		}
+	}
+	if (!sigs && (sysflags & __xn_exec_switchback) && switched)
+		/* -EPERM will be trapped later if needed. */
+		xnthread_harden();
+
+ret_handled:
+	/* Update the stats and userland-visible state. */
+	if (thread) {
+		xnthread_clear_localinfo(thread, XNDESCENT);
+		xnstat_counter_inc(&thread->stat.xsc);
+		xnthread_sync_window(thread);
+	}
+
+	trace_cobalt_head_sysexit(__xn_reg_rval(regs));
+
+	return KEVENT_STOP;
+
+linux_syscall:
+	if (xnsched_root_p())
+		/*
+		 * The call originates from the Linux domain, either
+		 * from a relaxed shadow or from a regular Linux task;
+		 * just propagate the event so that we will fall back
+		 * to handle_root_syscall().
+		 */
+		return KEVENT_PROPAGATE;
+
+	if (!__xn_rootcall_p(regs, &code))
+		goto bad_syscall;
+
+	if (pipeline_handle_vdso_fallback(code, regs))
+		return KEVENT_STOP;
+
+	/*
+	 * We know this is a Cobalt thread since it runs over the head
+	 * domain, however the current syscall should be handled by
+	 * the host kernel instead.  Before this happens, we have to
+	 * re-enter the root domain.
+	 */
+	xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+
+	return KEVENT_PROPAGATE;
+
+bad_syscall:
+	printk(XENO_WARNING "bad syscall <%#x>\n", code);
+
+	__xn_error_return(regs, -ENOSYS);
+
+	return KEVENT_STOP;
+}
+
+int handle_root_syscall(struct pt_regs *regs)
+{
+	int sysflags, switched, sigs;
+	struct xnthread *thread;
+	cobalt_syshand handler;
+	struct task_struct *p;
+	unsigned long args[6];
+	unsigned int nr, code;
+	long ret;
+
+	/*
+	 * Catch cancellation requests pending for user shadows
+	 * running mostly in secondary mode, i.e. XNWEAK. In that
+	 * case, we won't run prepare_for_signal() that frequently, so
+	 * check for cancellation here.
+	 */
+	xnthread_test_cancel();
+
+	if (!__xn_syscall_p(regs))
+		/* Fall back to Linux syscall handling. */
+		return KEVENT_PROPAGATE;
+
+	thread = xnthread_current();
+	/* code has already been checked in the head domain handler. */
+	code = __xn_syscall(regs);
+	nr = code & (__NR_COBALT_SYSCALLS - 1);
+
+	trace_cobalt_root_sysentry(code);
+
+	/* Processing a Xenomai syscall. */
+
+	handler = cobalt_syscalls[code];
+	sysflags = cobalt_sysmodes[nr];
+
+	if (thread && (sysflags & __xn_exec_conforming))
+		sysflags |= __xn_exec_histage;
+restart:
+	/*
+	 * Process adaptive syscalls by restarting them in the
+	 * opposite domain upon receiving -ENOSYS from the syscall
+	 * handler.
+	 */
+	switched = 0;
+	if (sysflags & __xn_exec_histage) {
+		/*
+		 * This request originates from the Linux domain but
+		 * should run into the Xenomai domain: harden the
+		 * caller before invoking the syscall handler.
+		 */
+		ret = xnthread_harden();
+		if (ret) {
+			__xn_error_return(regs, ret);
+			goto ret_handled;
+		}
+		switched = 1;
+	} else {
+		/*
+		 * We want to run the syscall in the current Linux
+		 * domain. This is a slow path, so proceed with any
+		 * pending schedparam update on the fly.
+		 */
+		if (thread)
+			xnthread_propagate_schedparam(thread);
+	}
+
+	p = current;
+	pipeline_get_syscall_args(p, regs, args);
+
+	ret = handler(args[0], args[1], args[2], args[3], args[4]);
+	if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive)) {
+		sysflags ^= __xn_exec_histage;
+		if (switched) {
+			xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+			sysflags &= ~__xn_exec_adaptive;
+			 /* Mark the primary -> secondary transition. */
+			xnthread_set_localinfo(thread, XNDESCENT);
+		}
+		goto restart;
+	}
+
+	__xn_status_return(regs, ret);
+
+	sigs = 0;
+	if (!xnsched_root_p()) {
+		/*
+		 * We may have gained a shadow TCB from the syscall we
+		 * just invoked, so make sure to fetch it.
+		 */
+		thread = xnthread_current();
+		if (signal_pending(p)) {
+			sigs = 1;
+			prepare_for_signal(p, thread, regs, sysflags);
+		} else if (xnthread_test_state(thread, XNWEAK) &&
+			   thread->res_count == 0)
+			sysflags |= __xn_exec_switchback;
+	}
+	if (!sigs && (sysflags & __xn_exec_switchback)
+	    && (switched || xnsched_primary_p()))
+		xnthread_relax(0, 0);
+
+ret_handled:
+	/* Update the stats and userland-visible state. */
+	if (thread) {
+		xnthread_clear_localinfo(thread, XNDESCENT|XNHICCUP);
+		xnstat_counter_inc(&thread->stat.xsc);
+		xnthread_sync_window(thread);
+	}
+
+	trace_cobalt_root_sysexit(__xn_reg_rval(regs));
+
+	return KEVENT_STOP;
+}
+
+long cobalt_restart_syscall_placeholder(struct restart_block *param)
+{
+	return -EINVAL;
+}
+++ linux-patched/kernel/xenomai/posix/event.c	2022-03-21 12:58:29.005892706 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/compat.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include "event.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+/*
+ * Cobalt event notification services
+ *
+ * An event flag group is a synchronization object represented by a
+ * regular native integer; every available bit in such word can be
+ * used to map a user-defined event flag.  When a flag is set, the
+ * associated event is said to have occurred.
+ *
+ * Xenomai threads and interrupt handlers can use event flags to
+ * signal the occurrence of events to other threads; those threads can
+ * either wait for the events to occur in a conjunctive manner (all
+ * awaited events must have occurred to wake up), or in a disjunctive
+ * way (at least one of the awaited events must have occurred to wake
+ * up).
+ *
+ * We expose this non-POSIX feature through the internal API, as a
+ * fast IPC mechanism available to the Copperplate interface.
+ */
+
+struct event_wait_context {
+	struct xnthread_wait_context wc;
+	unsigned int value;
+	int mode;
+};
+
+COBALT_SYSCALL(event_init, current,
+	       (struct cobalt_event_shadow __user *u_event,
+		unsigned int value, int flags))
+{
+	struct cobalt_event_shadow shadow;
+	struct cobalt_event_state *state;
+	int pshared, synflags, ret;
+	struct cobalt_event *event;
+	struct cobalt_umm *umm;
+	unsigned long stateoff;
+	spl_t s;
+
+	trace_cobalt_event_init(u_event, value, flags);
+
+	event = xnmalloc(sizeof(*event));
+	if (event == NULL)
+		return -ENOMEM;
+
+	pshared = (flags & COBALT_EVENT_SHARED) != 0;
+	umm = &cobalt_ppd_get(pshared)->umm;
+	state = cobalt_umm_alloc(umm, sizeof(*state));
+	if (state == NULL) {
+		xnfree(event);
+		return -EAGAIN;
+	}
+
+	ret = xnregistry_enter_anon(event, &event->resnode.handle);
+	if (ret) {
+		cobalt_umm_free(umm, state);
+		xnfree(event);
+		return ret;
+	}
+
+	event->state = state;
+	event->flags = flags;
+	synflags = (flags & COBALT_EVENT_PRIO) ? XNSYNCH_PRIO : XNSYNCH_FIFO;
+	xnsynch_init(&event->synch, synflags, NULL);
+	state->value = value;
+	state->flags = 0;
+	state->nwaiters = 0;
+	stateoff = cobalt_umm_offset(umm, state);
+	XENO_BUG_ON(COBALT, stateoff != (__u32)stateoff);
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_add_resource(&event->resnode, event, pshared);
+	event->magic = COBALT_EVENT_MAGIC;
+	xnlock_put_irqrestore(&nklock, s);
+
+	shadow.flags = flags;
+	shadow.handle = event->resnode.handle;
+	shadow.state_offset = (__u32)stateoff;
+
+	return cobalt_copy_to_user(u_event, &shadow, sizeof(*u_event));
+}
+
+int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
+			unsigned int bits,
+			unsigned int __user *u_bits_r,
+			int mode, const struct timespec64 *ts)
+{
+	unsigned int rbits = 0, testval;
+	xnticks_t timeout = XN_INFINITE;
+	struct cobalt_event_state *state;
+	xntmode_t tmode = XN_RELATIVE;
+	struct event_wait_context ewc;
+	struct cobalt_event *event;
+	xnhandle_t handle;
+	int ret = 0, info;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	if (ts) {
+		if (!timespec64_valid(ts))
+			return -EINVAL;
+
+		timeout = ts2ns(ts);
+		if (timeout) {
+			timeout++;
+			tmode = XN_ABSOLUTE;
+		} else
+			timeout = XN_NONBLOCK;
+		trace_cobalt_event_timedwait(u_event, bits, mode, ts);
+	} else
+		trace_cobalt_event_wait(u_event, bits, mode);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	event = xnregistry_lookup(handle, NULL);
+	if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	state = event->state;
+
+	if (bits == 0) {
+		/*
+		 * Special case: we don't wait for any event, we only
+		 * return the current flag group value.
+		 */
+		rbits = state->value;
+		goto out;
+	}
+
+	state->flags |= COBALT_EVENT_PENDED;
+	rbits = state->value & bits;
+	testval = mode & COBALT_EVENT_ANY ? rbits : bits;
+	if (rbits && rbits == testval)
+		goto done;
+
+	if (timeout == XN_NONBLOCK) {
+		ret = -EWOULDBLOCK;
+		goto done;
+	}
+
+	ewc.value = bits;
+	ewc.mode = mode;
+	xnthread_prepare_wait(&ewc.wc);
+	state->nwaiters++;
+	info = xnsynch_sleep_on(&event->synch, timeout, tmode);
+	if (info & XNRMID) {
+		ret = -EIDRM;
+		goto out;
+	}
+	if (info & (XNBREAK|XNTIMEO)) {
+		state->nwaiters--;
+		ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
+	} else
+		rbits = ewc.value;
+done:
+	if (!xnsynch_pended_p(&event->synch))
+		state->flags &= ~COBALT_EVENT_PENDED;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (ret == 0 &&
+	    cobalt_copy_to_user(u_bits_r, &rbits, sizeof(rbits)))
+		return -EFAULT;
+
+	return ret;
+}
+
+int __cobalt_event_wait64(struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits,
+			  unsigned int __user *u_bits_r,
+			  int mode, const struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_timespec64(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp);
+}
+
+COBALT_SYSCALL(event_wait, primary,
+	       (struct cobalt_event_shadow __user *u_event,
+		unsigned int bits,
+		unsigned int __user *u_bits_r,
+		int mode, const struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_u_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp);
+}
+
+COBALT_SYSCALL(event_wait64, primary,
+	       (struct cobalt_event_shadow __user *u_event,
+		unsigned int bits,
+		unsigned int __user *u_bits_r,
+		int mode, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_event_wait64(u_event, bits, u_bits_r, mode, u_ts);
+}
+
+COBALT_SYSCALL(event_sync, current,
+	       (struct cobalt_event_shadow __user *u_event))
+{
+	unsigned int bits, waitval, testval;
+	struct xnthread_wait_context *wc;
+	struct cobalt_event_state *state;
+	struct event_wait_context *ewc;
+	struct cobalt_event *event;
+	struct xnthread *p, *tmp;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	event = xnregistry_lookup(handle, NULL);
+	if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Userland has already updated the bitmask, our job is to
+	 * wake up any thread which could be satisfied by its current
+	 * value.
+	 */
+	state = event->state;
+	bits = state->value;
+
+	xnsynch_for_each_sleeper_safe(p, tmp, &event->synch) {
+		wc = xnthread_get_wait_context(p);
+		ewc = container_of(wc, struct event_wait_context, wc);
+		waitval = ewc->value & bits;
+		testval = ewc->mode & COBALT_EVENT_ANY ? waitval : ewc->value;
+		if (waitval && waitval == testval) {
+			state->nwaiters--;
+			ewc->value = waitval;
+			xnsynch_wakeup_this_sleeper(&event->synch, p);
+		}
+	}
+
+	xnsched_run();
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(event_destroy, current,
+	       (struct cobalt_event_shadow __user *u_event))
+{
+	struct cobalt_event *event;
+	xnhandle_t handle;
+	spl_t s;
+
+	trace_cobalt_event_destroy(u_event);
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	event = xnregistry_lookup(handle, NULL);
+	if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+
+	cobalt_event_reclaim(&event->resnode, s); /* drops lock */
+
+	return 0;
+}
+
+COBALT_SYSCALL(event_inquire, current,
+	       (struct cobalt_event_shadow __user *u_event,
+		struct cobalt_event_info __user *u_info,
+		pid_t __user *u_waitlist,
+		size_t waitsz))
+{
+	int nrpend = 0, nrwait = 0, nrpids, ret = 0;
+	unsigned long pstamp, nstamp = 0;
+	struct cobalt_event_info info;
+	struct cobalt_event *event;
+	pid_t *t = NULL, fbuf[16];
+	struct xnthread *thread;
+	xnhandle_t handle;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	nrpids = waitsz / sizeof(pid_t);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	for (;;) {
+		pstamp = nstamp;
+		event = xnregistry_lookup(handle, &nstamp);
+		if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+		/*
+		 * Allocate memory to return the wait list without
+		 * holding any lock, then revalidate the handle.
+		 */
+		if (t == NULL) {
+			nrpend = 0;
+			if (!xnsynch_pended_p(&event->synch))
+				break;
+			xnsynch_for_each_sleeper(thread, &event->synch)
+				nrpend++;
+			if (u_waitlist == NULL)
+				break;
+			xnlock_put_irqrestore(&nklock, s);
+			if (nrpids > nrpend)
+				nrpids = nrpend;
+			if (nrpend <= ARRAY_SIZE(fbuf))
+				t = fbuf; /* Use fast buffer. */
+			else {
+				t = xnmalloc(nrpend * sizeof(pid_t));
+				if (t == NULL)
+					return -ENOMEM;
+			}
+			xnlock_get_irqsave(&nklock, s);
+		} else if (pstamp == nstamp)
+			break;
+		else {
+			xnlock_put_irqrestore(&nklock, s);
+			if (t != fbuf)
+				xnfree(t);
+			t = NULL;
+			xnlock_get_irqsave(&nklock, s);
+		}
+	}
+
+	info.flags = event->flags;
+	info.value = event->value;
+	info.nrwait = nrpend;
+
+	if (xnsynch_pended_p(&event->synch) && u_waitlist != NULL) {
+		xnsynch_for_each_sleeper(thread, &event->synch) {
+			if (nrwait >= nrpids)
+				break;
+			t[nrwait++] = xnthread_host_pid(thread);
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = cobalt_copy_to_user(u_info, &info, sizeof(info));
+	if (ret == 0 && nrwait > 0)
+		ret = cobalt_copy_to_user(u_waitlist, t, nrwait * sizeof(pid_t));
+
+	if (t && t != fbuf)
+		xnfree(t);
+
+	return ret ?: nrwait;
+}
+
+void cobalt_event_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_event *event;
+	struct cobalt_umm *umm;
+	int pshared;
+
+	event = container_of(node, struct cobalt_event, resnode);
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	xnsynch_destroy(&event->synch);
+	pshared = (event->flags & COBALT_EVENT_SHARED) != 0;
+	xnlock_put_irqrestore(&nklock, s);
+
+	umm = &cobalt_ppd_get(pshared)->umm;
+	cobalt_umm_free(umm, event->state);
+	xnfree(event);
+}
+++ linux-patched/kernel/xenomai/posix/compat.c	2022-03-21 12:58:29.001892745 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/event.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/err.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <cobalt/kernel/compat.h>
+#include <asm/xenomai/syscall.h>
+#include <xenomai/posix/mqueue.h>
+
+int sys32_get_timespec(struct timespec64 *ts,
+		       const struct old_timespec32 __user *u_cts)
+{
+	struct old_timespec32 cts;
+
+	if (u_cts == NULL || !access_rok(u_cts, sizeof(*u_cts)))
+		return -EFAULT;
+
+	if (__xn_get_user(cts.tv_sec, &u_cts->tv_sec) ||
+		__xn_get_user(cts.tv_nsec, &u_cts->tv_nsec))
+		return -EFAULT;
+
+	ts->tv_sec = cts.tv_sec;
+	ts->tv_nsec = cts.tv_nsec;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_timespec);
+
+int sys32_put_timespec(struct old_timespec32 __user *u_cts,
+		       const struct timespec64 *ts)
+{
+	struct old_timespec32 cts;
+
+	if (u_cts == NULL || !access_wok(u_cts, sizeof(*u_cts)))
+		return -EFAULT;
+
+	cts.tv_sec = ts->tv_sec;
+	cts.tv_nsec = ts->tv_nsec;
+
+	if (__xn_put_user(cts.tv_sec, &u_cts->tv_sec) ||
+	    __xn_put_user(cts.tv_nsec, &u_cts->tv_nsec))
+		return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_timespec);
+
+int sys32_get_itimerspec(struct itimerspec64 *its,
+			 const struct old_itimerspec32 __user *cits)
+{
+	int ret = sys32_get_timespec(&its->it_value, &cits->it_value);
+
+	return ret ?: sys32_get_timespec(&its->it_interval, &cits->it_interval);
+}
+EXPORT_SYMBOL_GPL(sys32_get_itimerspec);
+
+int sys32_put_itimerspec(struct old_itimerspec32 __user *cits,
+			 const struct itimerspec64 *its)
+{
+	int ret = sys32_put_timespec(&cits->it_value, &its->it_value);
+
+	return ret ?: sys32_put_timespec(&cits->it_interval, &its->it_interval);
+}
+EXPORT_SYMBOL_GPL(sys32_put_itimerspec);
+
+int sys32_get_timeval(struct __kernel_old_timeval *tv,
+		      const struct old_timeval32 __user *ctv)
+{
+	return (ctv == NULL ||
+		!access_rok(ctv, sizeof(*ctv)) ||
+		__xn_get_user(tv->tv_sec, &ctv->tv_sec) ||
+		__xn_get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_timeval);
+
+int sys32_put_timeval(struct old_timeval32 __user *ctv,
+		      const struct __kernel_old_timeval *tv)
+{
+	return (ctv == NULL ||
+		!access_wok(ctv, sizeof(*ctv)) ||
+		__xn_put_user(tv->tv_sec, &ctv->tv_sec) ||
+		__xn_put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_timeval);
+
+int sys32_get_timex(struct __kernel_timex *tx,
+		    const struct old_timex32 __user *ctx)
+{
+	struct __kernel_old_timeval time;
+	int ret;
+
+	memset(tx, 0, sizeof(*tx));
+
+	ret = sys32_get_timeval(&time, &ctx->time);
+	if (ret)
+		return ret;
+
+	tx->time.tv_sec = time.tv_sec;
+	tx->time.tv_usec = time.tv_usec;
+
+	if (!access_rok(ctx, sizeof(*ctx)) ||
+	    __xn_get_user(tx->modes, &ctx->modes) ||
+	    __xn_get_user(tx->offset, &ctx->offset) ||
+	    __xn_get_user(tx->freq, &ctx->freq) ||
+	    __xn_get_user(tx->maxerror, &ctx->maxerror) ||
+	    __xn_get_user(tx->esterror, &ctx->esterror) ||
+	    __xn_get_user(tx->status, &ctx->status) ||
+	    __xn_get_user(tx->constant, &ctx->constant) ||
+	    __xn_get_user(tx->precision, &ctx->precision) ||
+	    __xn_get_user(tx->tolerance, &ctx->tolerance) ||
+	    __xn_get_user(tx->tick, &ctx->tick) ||
+	    __xn_get_user(tx->ppsfreq, &ctx->ppsfreq) ||
+	    __xn_get_user(tx->jitter, &ctx->jitter) ||
+	    __xn_get_user(tx->shift, &ctx->shift) ||
+	    __xn_get_user(tx->stabil, &ctx->stabil) ||
+	    __xn_get_user(tx->jitcnt, &ctx->jitcnt) ||
+	    __xn_get_user(tx->calcnt, &ctx->calcnt) ||
+	    __xn_get_user(tx->errcnt, &ctx->errcnt) ||
+	    __xn_get_user(tx->stbcnt, &ctx->stbcnt))
+	  return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_timex);
+
+int sys32_put_timex(struct old_timex32 __user *ctx,
+		    const struct __kernel_timex *tx)
+{
+	struct __kernel_old_timeval time;
+	int ret;
+
+	time.tv_sec = tx->time.tv_sec;
+	time.tv_usec = tx->time.tv_usec;
+
+	ret = sys32_put_timeval(&ctx->time, &time);
+	if (ret)
+		return ret;
+
+	if (!access_wok(ctx, sizeof(*ctx)) ||
+	    __xn_put_user(tx->modes, &ctx->modes) ||
+	    __xn_put_user(tx->offset, &ctx->offset) ||
+	    __xn_put_user(tx->freq, &ctx->freq) ||
+	    __xn_put_user(tx->maxerror, &ctx->maxerror) ||
+	    __xn_put_user(tx->esterror, &ctx->esterror) ||
+	    __xn_put_user(tx->status, &ctx->status) ||
+	    __xn_put_user(tx->constant, &ctx->constant) ||
+	    __xn_put_user(tx->precision, &ctx->precision) ||
+	    __xn_put_user(tx->tolerance, &ctx->tolerance) ||
+	    __xn_put_user(tx->tick, &ctx->tick) ||
+	    __xn_put_user(tx->ppsfreq, &ctx->ppsfreq) ||
+	    __xn_put_user(tx->jitter, &ctx->jitter) ||
+	    __xn_put_user(tx->shift, &ctx->shift) ||
+	    __xn_put_user(tx->stabil, &ctx->stabil) ||
+	    __xn_put_user(tx->jitcnt, &ctx->jitcnt) ||
+	    __xn_put_user(tx->calcnt, &ctx->calcnt) ||
+	    __xn_put_user(tx->errcnt, &ctx->errcnt) ||
+	    __xn_put_user(tx->stbcnt, &ctx->stbcnt))
+	  return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_timex);
+
+int sys32_get_fdset(fd_set *fds, const compat_fd_set __user *cfds,
+		    size_t cfdsize)
+{
+	int rdpos, wrpos, rdlim = cfdsize / sizeof(compat_ulong_t);
+
+	if (cfds == NULL || !access_rok(cfds, cfdsize))
+		return -EFAULT;
+
+	for (rdpos = 0, wrpos = 0; rdpos < rdlim; rdpos++, wrpos++)
+		if (__xn_get_user(fds->fds_bits[wrpos], cfds->fds_bits + rdpos))
+			return -EFAULT;
+
+	return 0;
+}
+
+int sys32_put_fdset(compat_fd_set __user *cfds, const fd_set *fds,
+		    size_t fdsize)
+{
+	int rdpos, wrpos, wrlim = fdsize / sizeof(long);
+
+	if (cfds == NULL || !access_wok(cfds, wrlim * sizeof(compat_ulong_t)))
+		return -EFAULT;
+
+	for (rdpos = 0, wrpos = 0; wrpos < wrlim; rdpos++, wrpos++)
+		if (__xn_put_user(fds->fds_bits[rdpos], cfds->fds_bits + wrpos))
+			return -EFAULT;
+
+	return 0;
+}
+
+int sys32_get_param_ex(int policy,
+		       struct sched_param_ex *p,
+		       const struct compat_sched_param_ex __user *u_cp)
+{
+	struct compat_sched_param_ex cpex;
+
+	if (u_cp == NULL || cobalt_copy_from_user(&cpex, u_cp, sizeof(cpex)))
+		return -EFAULT;
+
+	p->sched_priority = cpex.sched_priority;
+
+	switch (policy) {
+	case SCHED_SPORADIC:
+		p->sched_ss_low_priority = cpex.sched_ss_low_priority;
+		p->sched_ss_max_repl = cpex.sched_ss_max_repl;
+		p->sched_ss_repl_period.tv_sec = cpex.sched_ss_repl_period.tv_sec;
+		p->sched_ss_repl_period.tv_nsec = cpex.sched_ss_repl_period.tv_nsec;
+		p->sched_ss_init_budget.tv_sec = cpex.sched_ss_init_budget.tv_sec;
+		p->sched_ss_init_budget.tv_nsec = cpex.sched_ss_init_budget.tv_nsec;
+		break;
+	case SCHED_RR:
+		p->sched_rr_quantum.tv_sec = cpex.sched_rr_quantum.tv_sec;
+		p->sched_rr_quantum.tv_nsec = cpex.sched_rr_quantum.tv_nsec;
+		break;
+	case SCHED_TP:
+		p->sched_tp_partition = cpex.sched_tp_partition;
+		break;
+	case SCHED_QUOTA:
+		p->sched_quota_group = cpex.sched_quota_group;
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_param_ex);
+
+int sys32_put_param_ex(int policy,
+		       struct compat_sched_param_ex __user *u_cp,
+		       const struct sched_param_ex *p)
+{
+	struct compat_sched_param_ex cpex;
+
+	if (u_cp == NULL)
+		return -EFAULT;
+
+	cpex.sched_priority = p->sched_priority;
+
+	switch (policy) {
+	case SCHED_SPORADIC:
+		cpex.sched_ss_low_priority = p->sched_ss_low_priority;
+		cpex.sched_ss_max_repl = p->sched_ss_max_repl;
+		cpex.sched_ss_repl_period.tv_sec = p->sched_ss_repl_period.tv_sec;
+		cpex.sched_ss_repl_period.tv_nsec = p->sched_ss_repl_period.tv_nsec;
+		cpex.sched_ss_init_budget.tv_sec = p->sched_ss_init_budget.tv_sec;
+		cpex.sched_ss_init_budget.tv_nsec = p->sched_ss_init_budget.tv_nsec;
+		break;
+	case SCHED_RR:
+		cpex.sched_rr_quantum.tv_sec = p->sched_rr_quantum.tv_sec;
+		cpex.sched_rr_quantum.tv_nsec = p->sched_rr_quantum.tv_nsec;
+		break;
+	case SCHED_TP:
+		cpex.sched_tp_partition = p->sched_tp_partition;
+		break;
+	case SCHED_QUOTA:
+		cpex.sched_quota_group = p->sched_quota_group;
+		break;
+	}
+
+	return cobalt_copy_to_user(u_cp, &cpex, sizeof(cpex));
+}
+EXPORT_SYMBOL_GPL(sys32_put_param_ex);
+
+int sys32_get_mqattr(struct mq_attr *ap,
+		     const struct compat_mq_attr __user *u_cap)
+{
+	struct compat_mq_attr cattr;
+
+	if (u_cap == NULL ||
+	    cobalt_copy_from_user(&cattr, u_cap, sizeof(cattr)))
+		return -EFAULT;
+
+	ap->mq_flags = cattr.mq_flags;
+	ap->mq_maxmsg = cattr.mq_maxmsg;
+	ap->mq_msgsize = cattr.mq_msgsize;
+	ap->mq_curmsgs = cattr.mq_curmsgs;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_mqattr);
+
+int sys32_put_mqattr(struct compat_mq_attr __user *u_cap,
+		     const struct mq_attr *ap)
+{
+	struct compat_mq_attr cattr;
+
+	cattr.mq_flags = ap->mq_flags;
+	cattr.mq_maxmsg = ap->mq_maxmsg;
+	cattr.mq_msgsize = ap->mq_msgsize;
+	cattr.mq_curmsgs = ap->mq_curmsgs;
+
+	return u_cap == NULL ? -EFAULT :
+		cobalt_copy_to_user(u_cap, &cattr, sizeof(cattr));
+}
+EXPORT_SYMBOL_GPL(sys32_put_mqattr);
+
+int sys32_get_sigevent(struct sigevent *ev,
+		       const struct compat_sigevent *__user u_cev)
+{
+	struct compat_sigevent cev;
+	compat_int_t *cp;
+	int ret, *p;
+
+	if (u_cev == NULL)
+		return -EFAULT;
+
+	ret = cobalt_copy_from_user(&cev, u_cev, sizeof(cev));
+	if (ret)
+		return ret;
+
+	memset(ev, 0, sizeof(*ev));
+	ev->sigev_value.sival_ptr = compat_ptr(cev.sigev_value.sival_ptr);
+	ev->sigev_signo = cev.sigev_signo;
+	ev->sigev_notify = cev.sigev_notify;
+	/*
+	 * Extensions may define extra fields we don't know about in
+	 * the padding area, so we have to load it entirely.
+	 */
+	p = ev->_sigev_un._pad;
+	cp = cev._sigev_un._pad;
+	while (p < &ev->_sigev_un._pad[ARRAY_SIZE(ev->_sigev_un._pad)] &&
+	       cp < &cev._sigev_un._pad[ARRAY_SIZE(cev._sigev_un._pad)])
+		*p++ = *cp++;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_sigevent);
+
+int sys32_get_sigset(sigset_t *set, const compat_sigset_t *u_cset)
+{
+#ifdef __BIG_ENDIAN
+	compat_sigset_t v;
+
+	if (cobalt_copy_from_user(&v, u_cset, sizeof(compat_sigset_t)))
+		return -EFAULT;
+	switch (_NSIG_WORDS) {
+	case 4: set->sig[3] = v.sig[6] | (((long)v.sig[7]) << 32 );
+	case 3: set->sig[2] = v.sig[4] | (((long)v.sig[5]) << 32 );
+	case 2: set->sig[1] = v.sig[2] | (((long)v.sig[3]) << 32 );
+	case 1: set->sig[0] = v.sig[0] | (((long)v.sig[1]) << 32 );
+	}
+#else
+	if (cobalt_copy_from_user(set, u_cset, sizeof(compat_sigset_t)))
+		return -EFAULT;
+#endif
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_sigset);
+
+int sys32_put_sigset(compat_sigset_t *u_cset, const sigset_t *set)
+{
+#ifdef __BIG_ENDIAN
+	compat_sigset_t v;
+	switch (_NSIG_WORDS) {
+	case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
+	case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
+	case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
+	case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
+	}
+	return cobalt_copy_to_user(u_cset, &v, sizeof(*u_cset)) ? -EFAULT : 0;
+#else
+	return cobalt_copy_to_user(u_cset, set, sizeof(*u_cset)) ? -EFAULT : 0;
+#endif
+}
+EXPORT_SYMBOL_GPL(sys32_put_sigset);
+
+int sys32_get_sigval(union sigval *val, const union compat_sigval *u_cval)
+{
+	union compat_sigval cval;
+	int ret;
+
+	if (u_cval == NULL)
+		return -EFAULT;
+
+	ret = cobalt_copy_from_user(&cval, u_cval, sizeof(cval));
+	if (ret)
+		return ret;
+
+	val->sival_ptr = compat_ptr(cval.sival_ptr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_sigval);
+
+int sys32_put_siginfo(void __user *u_si, const struct siginfo *si,
+		      int overrun)
+{
+	struct compat_siginfo __user *u_p = u_si;
+	int ret;
+
+	if (u_p == NULL)
+		return -EFAULT;
+
+	ret = __xn_put_user(si->si_signo, &u_p->si_signo);
+	ret |= __xn_put_user(si->si_errno, &u_p->si_errno);
+	ret |= __xn_put_user(si->si_code, &u_p->si_code);
+
+	/*
+	 * Copy the generic/standard siginfo bits to userland.
+	 */
+	switch (si->si_code) {
+	case SI_TIMER:
+		ret |= __xn_put_user(si->si_tid, &u_p->si_tid);
+		ret |= __xn_put_user(ptr_to_compat(si->si_ptr), &u_p->si_ptr);
+		ret |= __xn_put_user(overrun, &u_p->si_overrun);
+		break;
+	case SI_QUEUE:
+	case SI_MESGQ:
+		ret |= __xn_put_user(ptr_to_compat(si->si_ptr), &u_p->si_ptr);
+		fallthrough;
+	case SI_USER:
+		ret |= __xn_put_user(si->si_pid, &u_p->si_pid);
+		ret |= __xn_put_user(si->si_uid, &u_p->si_uid);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(sys32_put_siginfo);
+
+int sys32_get_msghdr(struct user_msghdr *msg,
+		     const struct compat_msghdr __user *u_cmsg)
+{
+	compat_uptr_t tmp1, tmp2, tmp3;
+
+	if (u_cmsg == NULL ||
+	    !access_rok(u_cmsg, sizeof(*u_cmsg)) ||
+	    __xn_get_user(tmp1, &u_cmsg->msg_name) ||
+	    __xn_get_user(msg->msg_namelen, &u_cmsg->msg_namelen) ||
+	    __xn_get_user(tmp2, &u_cmsg->msg_iov) ||
+	    __xn_get_user(msg->msg_iovlen, &u_cmsg->msg_iovlen) ||
+	    __xn_get_user(tmp3, &u_cmsg->msg_control) ||
+	    __xn_get_user(msg->msg_controllen, &u_cmsg->msg_controllen) ||
+	    __xn_get_user(msg->msg_flags, &u_cmsg->msg_flags))
+		return -EFAULT;
+
+	if (msg->msg_namelen > sizeof(struct sockaddr_storage))
+		msg->msg_namelen = sizeof(struct sockaddr_storage);
+
+	msg->msg_name = compat_ptr(tmp1);
+	msg->msg_iov = compat_ptr(tmp2);
+	msg->msg_control = compat_ptr(tmp3);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_msghdr);
+
+int sys32_get_mmsghdr(struct mmsghdr *mmsg,
+		      const struct compat_mmsghdr __user *u_cmmsg)
+{
+	if (u_cmmsg == NULL ||
+	    !access_rok(u_cmmsg, sizeof(*u_cmmsg)) ||
+	    __xn_get_user(mmsg->msg_len, &u_cmmsg->msg_len))
+		return -EFAULT;
+
+	return sys32_get_msghdr(&mmsg->msg_hdr, &u_cmmsg->msg_hdr);
+}
+EXPORT_SYMBOL_GPL(sys32_get_mmsghdr);
+
+int sys32_put_msghdr(struct compat_msghdr __user *u_cmsg,
+		     const struct user_msghdr *msg)
+{
+	if (u_cmsg == NULL ||
+	    !access_wok(u_cmsg, sizeof(*u_cmsg)) ||
+	    __xn_put_user(ptr_to_compat(msg->msg_name), &u_cmsg->msg_name) ||
+	    __xn_put_user(msg->msg_namelen, &u_cmsg->msg_namelen) ||
+	    __xn_put_user(ptr_to_compat(msg->msg_iov), &u_cmsg->msg_iov) ||
+	    __xn_put_user(msg->msg_iovlen, &u_cmsg->msg_iovlen) ||
+	    __xn_put_user(ptr_to_compat(msg->msg_control), &u_cmsg->msg_control) ||
+	    __xn_put_user(msg->msg_controllen, &u_cmsg->msg_controllen) ||
+	    __xn_put_user(msg->msg_flags, &u_cmsg->msg_flags))
+		return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_msghdr);
+
+int sys32_put_mmsghdr(struct compat_mmsghdr __user *u_cmmsg,
+		     const struct mmsghdr *mmsg)
+{
+	if (u_cmmsg == NULL ||
+	    !access_wok(u_cmmsg, sizeof(*u_cmmsg)) ||
+	    __xn_put_user(mmsg->msg_len, &u_cmmsg->msg_len))
+		return -EFAULT;
+
+	return sys32_put_msghdr(&u_cmmsg->msg_hdr, &mmsg->msg_hdr);
+}
+EXPORT_SYMBOL_GPL(sys32_put_mmsghdr);
+
+int sys32_get_iovec(struct iovec *iov,
+		    const struct compat_iovec __user *u_ciov,
+		    int ciovlen)
+{
+	const struct compat_iovec __user *p;
+	struct compat_iovec ciov;
+	int ret, n;
+
+	for (n = 0, p = u_ciov; n < ciovlen; n++, p++) {
+		ret = cobalt_copy_from_user(&ciov, p, sizeof(ciov));
+		if (ret)
+			return ret;
+		iov[n].iov_base = compat_ptr(ciov.iov_base);
+		iov[n].iov_len = ciov.iov_len;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_iovec);
+
+int sys32_put_iovec(struct compat_iovec __user *u_ciov,
+		    const struct iovec *iov,
+		    int iovlen)
+{
+	struct compat_iovec __user *p;
+	struct compat_iovec ciov;
+	int ret, n;
+
+	for (n = 0, p = u_ciov; n < iovlen; n++, p++) {
+		ciov.iov_base = ptr_to_compat(iov[n].iov_base);
+		ciov.iov_len = iov[n].iov_len;
+		ret = cobalt_copy_to_user(p, &ciov, sizeof(*p));
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_iovec);
+++ linux-patched/kernel/xenomai/posix/event.h	2022-03-21 12:58:28.998892774 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/memory.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_EVENT_H
+#define _COBALT_POSIX_EVENT_H
+
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/event.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_resources;
+struct cobalt_process;
+
+struct cobalt_event {
+	unsigned int magic;
+	unsigned int value;
+	int flags;
+	struct xnsynch synch;
+	struct cobalt_event_state *state;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
+			unsigned int bits,
+			unsigned int __user *u_bits_r,
+			int mode, const struct timespec64 *ts);
+
+int __cobalt_event_wait64(struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits, unsigned int __user *u_bits_r,
+			  int mode,
+			  const struct __kernel_timespec __user *u_ts);
+
+COBALT_SYSCALL_DECL(event_init,
+		    (struct cobalt_event_shadow __user *u_evtsh,
+		     unsigned int value,
+		     int flags));
+
+COBALT_SYSCALL_DECL(event_wait,
+		    (struct cobalt_event_shadow __user *u_evtsh,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(event_wait64,
+		    (struct cobalt_event_shadow __user *u_evtsh,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(event_sync,
+		    (struct cobalt_event_shadow __user *u_evtsh));
+
+COBALT_SYSCALL_DECL(event_destroy,
+		    (struct cobalt_event_shadow __user *u_evtsh));
+
+COBALT_SYSCALL_DECL(event_inquire,
+		    (struct cobalt_event_shadow __user *u_event,
+		     struct cobalt_event_info __user *u_info,
+		     pid_t __user *u_waitlist,
+		     size_t waitsz));
+
+void cobalt_event_reclaim(struct cobalt_resnode *node,
+			  spl_t s);
+
+#endif /* !_COBALT_POSIX_EVENT_H */
+++ linux-patched/kernel/xenomai/posix/memory.c	2022-03-21 12:58:28.995892803 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/syscall32.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+#include <rtdm/driver.h>
+#include <cobalt/kernel/vdso.h>
+#include "process.h"
+#include "memory.h"
+
+#define UMM_PRIVATE  0	/* Per-process user-mapped memory heap */
+#define UMM_SHARED   1	/* Shared user-mapped memory heap */
+#define SYS_GLOBAL   2	/* System heap (not mmapped) */
+
+struct xnvdso *nkvdso;
+EXPORT_SYMBOL_GPL(nkvdso);
+
+static void umm_vmopen(struct vm_area_struct *vma)
+{
+	struct cobalt_umm *umm = vma->vm_private_data;
+
+	atomic_inc(&umm->refcount);
+}
+
+static void umm_vmclose(struct vm_area_struct *vma)
+{
+	struct cobalt_umm *umm = vma->vm_private_data;
+
+	cobalt_umm_destroy(umm);
+}
+
+static struct vm_operations_struct umm_vmops = {
+	.open = umm_vmopen,
+	.close = umm_vmclose,
+};
+
+static struct cobalt_umm *umm_from_fd(struct rtdm_fd *fd)
+{
+	struct cobalt_process *process;
+
+	process = cobalt_current_process();
+	if (process == NULL)
+		return NULL;
+
+	if (rtdm_fd_minor(fd) == UMM_PRIVATE)
+		return &process->sys_ppd.umm;
+
+	return &cobalt_kernel_ppd.umm;
+}
+
+static int umm_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct cobalt_umm *umm;
+	size_t len;
+	int ret;
+
+	umm = umm_from_fd(fd);
+	if (fd == NULL)
+		return -ENODEV;
+
+	len = vma->vm_end - vma->vm_start;
+	if (len != xnheap_get_size(&umm->heap))
+		return -EINVAL;
+
+	vma->vm_private_data = umm;
+	vma->vm_ops = &umm_vmops;
+	if (xnarch_cache_aliasing())
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	ret = rtdm_mmap_vmem(vma, xnheap_get_membase(&umm->heap));
+	if (ret)
+		return ret;
+
+	atomic_inc(&umm->refcount);
+
+	return 0;
+}
+
+#ifndef CONFIG_MMU
+static unsigned long umm_get_unmapped_area(struct rtdm_fd *fd,
+					   unsigned long len,
+					   unsigned long pgoff,
+					   unsigned long flags)
+{
+	struct cobalt_umm *umm;
+
+	umm = umm_from_fd(fd);
+	if (umm == NULL)
+		return -ENODEV;
+
+	if (pgoff == 0)
+		return (unsigned long)xnheap_get_membase(&umm->heap);
+
+	return pgoff << PAGE_SHIFT;
+}
+#else
+#define umm_get_unmapped_area	NULL
+#endif
+
+static int stat_umm(struct rtdm_fd *fd,
+		    struct cobalt_umm __user *u_stat)
+{
+	struct cobalt_memdev_stat stat;
+	struct cobalt_umm *umm;
+	spl_t s;
+	
+	umm = umm_from_fd(fd);
+	if (umm == NULL)
+		return -ENODEV;
+
+	xnlock_get_irqsave(&umm->heap.lock, s);
+	stat.size = xnheap_get_size(&umm->heap);
+	stat.free = xnheap_get_free(&umm->heap);
+	xnlock_put_irqrestore(&umm->heap.lock, s);
+
+	return rtdm_safe_copy_to_user(fd, u_stat, &stat, sizeof(stat));
+}
+
+static int do_umm_ioctls(struct rtdm_fd *fd,
+			 unsigned int request, void __user *arg)
+{
+	int ret;
+
+	switch (request) {
+	case MEMDEV_RTIOC_STAT:
+		ret = stat_umm(fd, arg);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int umm_ioctl_rt(struct rtdm_fd *fd,
+			unsigned int request, void __user *arg)
+{
+	return do_umm_ioctls(fd, request, arg);
+}
+
+static int umm_ioctl_nrt(struct rtdm_fd *fd,
+			 unsigned int request, void __user *arg)
+{
+	return do_umm_ioctls(fd, request, arg);
+}
+
+static int sysmem_open(struct rtdm_fd *fd, int oflags)
+{
+	if ((oflags & O_ACCMODE) != O_RDONLY)
+		return -EACCES;
+
+	return 0;
+}
+
+static int do_sysmem_ioctls(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	struct cobalt_memdev_stat stat;
+	spl_t s;
+	int ret;
+
+	switch (request) {
+	case MEMDEV_RTIOC_STAT:
+		xnlock_get_irqsave(&cobalt_heap.lock, s);
+		stat.size = xnheap_get_size(&cobalt_heap);
+		stat.free = xnheap_get_free(&cobalt_heap);
+		xnlock_put_irqrestore(&cobalt_heap.lock, s);
+		ret = rtdm_safe_copy_to_user(fd, arg, &stat, sizeof(stat));
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int sysmem_ioctl_rt(struct rtdm_fd *fd,
+			   unsigned int request, void __user *arg)
+{
+	return do_sysmem_ioctls(fd, request, arg);
+}
+
+static int sysmem_ioctl_nrt(struct rtdm_fd *fd,
+			   unsigned int request, void __user *arg)
+{
+	return do_sysmem_ioctls(fd, request, arg);
+}
+
+static struct rtdm_driver umm_driver = {
+	.profile_info	=	RTDM_PROFILE_INFO(umm,
+						  RTDM_CLASS_MEMORY,
+						  RTDM_SUBCLASS_GENERIC,
+						  0),
+	.device_flags	=	RTDM_NAMED_DEVICE|RTDM_FIXED_MINOR,
+	.device_count	=	2,
+	.ops = {
+		.ioctl_rt		=	umm_ioctl_rt,
+		.ioctl_nrt		=	umm_ioctl_nrt,
+		.mmap			=	umm_mmap,
+		.get_unmapped_area	=	umm_get_unmapped_area,
+	},
+};
+
+static struct rtdm_device umm_devices[] = {
+	[ UMM_PRIVATE ] = {
+		.driver = &umm_driver,
+		.label = COBALT_MEMDEV_PRIVATE,
+		.minor = UMM_PRIVATE,
+	},
+	[ UMM_SHARED ] = {
+		.driver = &umm_driver,
+		.label = COBALT_MEMDEV_SHARED,
+		.minor = UMM_SHARED,
+	},
+};
+
+static struct rtdm_driver sysmem_driver = {
+	.profile_info	=	RTDM_PROFILE_INFO(sysmem,
+						  RTDM_CLASS_MEMORY,
+						  SYS_GLOBAL,
+						  0),
+	.device_flags	=	RTDM_NAMED_DEVICE,
+	.device_count	=	1,
+	.ops = {
+		.open		=	sysmem_open,
+		.ioctl_rt	=	sysmem_ioctl_rt,
+		.ioctl_nrt	=	sysmem_ioctl_nrt,
+	},
+};
+
+static struct rtdm_device sysmem_device = {
+	.driver = &sysmem_driver,
+	.label = COBALT_MEMDEV_SYS,
+};
+
+static inline void init_vdso(void)
+{
+	nkvdso->features = XNVDSO_FEATURES;
+	nkvdso->wallclock_offset = nkclock.wallclock_offset;
+}
+
+int cobalt_memdev_init(void)
+{
+	int ret;
+
+	ret = cobalt_umm_init(&cobalt_kernel_ppd.umm,
+			      CONFIG_XENO_OPT_SHARED_HEAPSZ * 1024, NULL);
+	if (ret)
+		return ret;
+
+	cobalt_umm_set_name(&cobalt_kernel_ppd.umm, "shared heap");
+
+	nkvdso = cobalt_umm_alloc(&cobalt_kernel_ppd.umm, sizeof(*nkvdso));
+	if (nkvdso == NULL) {
+		ret = -ENOMEM;
+		goto fail_vdso;
+	}
+
+	init_vdso();
+
+	ret = rtdm_dev_register(umm_devices + UMM_PRIVATE);
+	if (ret)
+		goto fail_private;
+
+	ret = rtdm_dev_register(umm_devices + UMM_SHARED);
+	if (ret)
+		goto fail_shared;
+
+	ret = rtdm_dev_register(&sysmem_device);
+	if (ret)
+		goto fail_sysmem;
+
+	return 0;
+
+fail_sysmem:
+	rtdm_dev_unregister(umm_devices + UMM_SHARED);
+fail_shared:
+	rtdm_dev_unregister(umm_devices + UMM_PRIVATE);
+fail_private:
+	cobalt_umm_free(&cobalt_kernel_ppd.umm, nkvdso);
+fail_vdso:
+	cobalt_umm_destroy(&cobalt_kernel_ppd.umm);
+
+	return ret;
+}
+
+void cobalt_memdev_cleanup(void)
+{
+	rtdm_dev_unregister(&sysmem_device);
+	rtdm_dev_unregister(umm_devices + UMM_SHARED);
+	rtdm_dev_unregister(umm_devices + UMM_PRIVATE);
+	cobalt_umm_free(&cobalt_kernel_ppd.umm, nkvdso);
+	cobalt_umm_destroy(&cobalt_kernel_ppd.umm);
+}
+
+int cobalt_umm_init(struct cobalt_umm *umm, u32 size,
+		    void (*release)(struct cobalt_umm *umm))
+{
+	void *basemem;
+	int ret;
+
+	secondary_mode_only();
+
+	/* We don't support CPUs with VIVT caches and the like. */
+	BUG_ON(xnarch_cache_aliasing());
+
+	size = PAGE_ALIGN(size);
+	basemem = vmalloc_kernel(size, __GFP_ZERO);
+	if (basemem == NULL)
+		return -ENOMEM;
+
+	ret = xnheap_init(&umm->heap, basemem, size);
+	if (ret) {
+		vfree(basemem);
+		return ret;
+	}
+
+	umm->release = release;
+	atomic_set(&umm->refcount, 1);
+	smp_mb();
+
+	return 0;
+}
+
+void cobalt_umm_destroy(struct cobalt_umm *umm)
+{
+	secondary_mode_only();
+
+	if (atomic_dec_and_test(&umm->refcount)) {
+		xnheap_destroy(&umm->heap);
+		vfree(xnheap_get_membase(&umm->heap));
+		if (umm->release)
+			umm->release(umm);
+	}
+}
+++ linux-patched/kernel/xenomai/posix/syscall32.c	2022-03-21 12:58:28.991892842 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/clock.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <cobalt/uapi/syscall.h>
+#include <cobalt/kernel/time.h>
+#include <xenomai/rtdm/internal.h>
+#include "internal.h"
+#include "syscall32.h"
+#include "thread.h"
+#include "mutex.h"
+#include "cond.h"
+#include "sem.h"
+#include "sched.h"
+#include "clock.h"
+#include "timer.h"
+#include "timerfd.h"
+#include "signal.h"
+#include "monitor.h"
+#include "event.h"
+#include "mqueue.h"
+#include "io.h"
+#include "../debug.h"
+
+COBALT_SYSCALL32emu(thread_create, init,
+		    (compat_ulong_t pth,
+		     int policy,
+		     const struct compat_sched_param_ex __user *u_param_ex,
+		     int xid,
+		     __u32 __user *u_winoff))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param_ex);
+	if (ret)
+		return ret;
+
+	return __cobalt_thread_create(pth, policy, &param_ex, xid, u_winoff);
+}
+
+COBALT_SYSCALL32emu(thread_setschedparam_ex, conforming,
+		    (compat_ulong_t pth,
+		     int policy,
+		     const struct compat_sched_param_ex __user *u_param_ex,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_thread_setschedparam_ex(pth, policy, &param_ex,
+					      u_winoff, u_promoted);
+}
+
+COBALT_SYSCALL32emu(thread_getschedparam_ex, current,
+		    (compat_ulong_t pth,
+		     int __user *u_policy,
+		     struct compat_sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_thread_getschedparam_ex(pth, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy));
+
+	return ret ?: sys32_put_param_ex(policy, u_param, &param_ex);
+}
+
+COBALT_SYSCALL32emu(thread_setschedprio, conforming,
+		    (compat_ulong_t pth,
+		     int prio,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted))
+{
+	return cobalt_thread_setschedprio(pth, prio, u_winoff, u_promoted);
+}
+
+static inline int sys32_fetch_timeout(struct timespec64 *ts,
+				      const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT :
+		sys32_get_timespec(ts, u_ts);
+}
+
+COBALT_SYSCALL32emu(sem_open, lostage,
+		    (compat_uptr_t __user *u_addrp,
+		     const char __user *u_name,
+		     int oflags, mode_t mode, unsigned int value))
+{
+	struct cobalt_sem_shadow __user *usm;
+	compat_uptr_t cusm;
+
+	if (__xn_get_user(cusm, u_addrp))
+		return -EFAULT;
+
+	usm = __cobalt_sem_open(compat_ptr(cusm), u_name, oflags, mode, value);
+	if (IS_ERR(usm))
+		return PTR_ERR(usm);
+
+	return __xn_put_user(ptr_to_compat(usm), u_addrp) ? -EFAULT : 0;
+}
+
+COBALT_SYSCALL32emu(sem_timedwait, primary,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct old_timespec32 __user *u_ts))
+{
+	int ret = 1;
+	struct timespec64 ts64;
+
+	if (u_ts)
+		ret = sys32_fetch_timeout(&ts64, u_ts);
+
+	return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64);
+}
+
+COBALT_SYSCALL32emu(sem_timedwait64, primary,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_sem_timedwait64(u_sem, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_getres, current,
+		    (clockid_t clock_id,
+		     struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_getres(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	return u_ts ? sys32_put_timespec(u_ts, &ts) : 0;
+}
+
+COBALT_SYSCALL32emu(clock_getres64, current,
+		    (clockid_t clock_id,
+		     struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_getres64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_gettime, current,
+		    (clockid_t clock_id,
+		     struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_gettime(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	return sys32_put_timespec(u_ts, &ts);
+}
+
+COBALT_SYSCALL32emu(clock_gettime64, current,
+		    (clockid_t clock_id,
+		     struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_gettime64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_settime, current,
+		    (clockid_t clock_id,
+		     const struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = sys32_get_timespec(&ts, u_ts);
+	if (ret)
+		return ret;
+
+	return __cobalt_clock_settime(clock_id, &ts);
+}
+
+COBALT_SYSCALL32emu(clock_settime64, current,
+		    (clockid_t clock_id,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_settime64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_adjtime, current,
+		    (clockid_t clock_id, struct old_timex32 __user *u_tx))
+{
+	struct __kernel_timex tx;
+	int ret;
+
+	ret = sys32_get_timex(&tx, u_tx);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_clock_adjtime(clock_id, &tx);
+	if (ret)
+		return ret;
+
+	return sys32_put_timex(u_tx, &tx);
+}
+
+COBALT_SYSCALL32emu(clock_adjtime64, current,
+		    (clockid_t clock_id, struct __kernel_timex __user *u_tx))
+{
+	return __cobalt_clock_adjtime64(clock_id, u_tx);
+}
+
+
+COBALT_SYSCALL32emu(clock_nanosleep, primary,
+		    (clockid_t clock_id, int flags,
+		     const struct old_timespec32 __user *u_rqt,
+		     struct old_timespec32 __user *u_rmt))
+{
+	struct timespec64 rqt, rmt, *rmtp = NULL;
+	int ret;
+
+	if (u_rmt)
+		rmtp = &rmt;
+
+	ret = sys32_get_timespec(&rqt, u_rqt);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp);
+	if (ret == -EINTR && flags == 0 && rmtp)
+		ret = sys32_put_timespec(u_rmt, rmtp);
+
+	return ret;
+}
+
+COBALT_SYSCALL32emu(clock_nanosleep64, nonrestartable,
+		    (clockid_t clock_id, int flags,
+		     const struct __kernel_timespec __user *u_rqt,
+		     struct __kernel_timespec __user *u_rmt))
+{
+	return __cobalt_clock_nanosleep64(clock_id, flags, u_rqt, u_rmt);
+}
+
+
+COBALT_SYSCALL32emu(mutex_timedlock, primary,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct old_timespec32 __user *u_ts))
+{
+	return __cobalt_mutex_timedlock_break(u_mx, u_ts, sys32_fetch_timeout);
+}
+
+COBALT_SYSCALL32emu(mutex_timedlock64, primary,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mutex_timedlock64(u_mx, u_ts);
+}
+
+COBALT_SYSCALL32emu(cond_wait_prologue, nonrestartable,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     struct cobalt_mutex_shadow __user *u_mx,
+		     int *u_err,
+		     unsigned int timed,
+		     struct old_timespec32 __user *u_ts))
+{
+	return __cobalt_cond_wait_prologue(u_cnd, u_mx, u_err, u_ts,
+					   timed ? sys32_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL32emu(mq_open, lostage,
+		    (const char __user *u_name, int oflags,
+		     mode_t mode, struct compat_mq_attr __user *u_attr))
+{
+	struct mq_attr _attr, *attr = &_attr;
+	int ret;
+
+	if ((oflags & O_CREAT) && u_attr) {
+		ret = sys32_get_mqattr(&_attr, u_attr);
+		if (ret)
+			return ret;
+	} else
+		attr = NULL;
+
+	return __cobalt_mq_open(u_name, oflags, mode, attr);
+}
+
+COBALT_SYSCALL32emu(mq_getattr, current,
+		    (mqd_t uqd, struct compat_mq_attr __user *u_attr))
+{
+	struct mq_attr attr;
+	int ret;
+
+	ret = __cobalt_mq_getattr(uqd, &attr);
+	if (ret)
+		return ret;
+
+	return sys32_put_mqattr(u_attr, &attr);
+}
+
+COBALT_SYSCALL32emu(mq_timedsend, primary,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio,
+		     const struct old_timespec32 __user *u_ts))
+{
+	return __cobalt_mq_timedsend(uqd, u_buf, len, prio,
+				     u_ts, u_ts ? sys32_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL32emu(mq_timedsend64, primary,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedsend64(uqd, u_buf, len, prio, u_ts);
+}
+
+COBALT_SYSCALL32emu(mq_timedreceive, primary,
+		    (mqd_t uqd, void __user *u_buf,
+		     compat_ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct old_timespec32 __user *u_ts))
+{
+	compat_ssize_t clen;
+	ssize_t len;
+	int ret;
+
+	ret = cobalt_copy_from_user(&clen, u_len, sizeof(*u_len));
+	if (ret)
+		return ret;
+
+	len = clen;
+	ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio,
+				       u_ts, u_ts ? sys32_fetch_timeout : NULL);
+	clen = len;
+
+	return ret ?: cobalt_copy_to_user(u_len, &clen, sizeof(*u_len));
+}
+
+COBALT_SYSCALL32emu(mq_timedreceive64, primary,
+		    (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedreceive64(uqd, u_buf, u_len, u_prio, u_ts);
+}
+
+static inline int mq_fetch_timeout(struct timespec64 *ts,
+				   const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts);
+}
+
+COBALT_SYSCALL32emu(mq_notify, primary,
+		    (mqd_t fd, const struct compat_sigevent *__user u_cev))
+{
+	struct sigevent sev;
+	int ret;
+
+	if (u_cev) {
+		ret = sys32_get_sigevent(&sev, u_cev);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_mq_notify(fd, u_cev ? &sev : NULL);
+}
+
+COBALT_SYSCALL32emu(sched_weightprio, current,
+		    (int policy,
+		     const struct compat_sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param);
+	if (ret)
+		return ret;
+
+	return __cobalt_sched_weightprio(policy, &param_ex);
+}
+
+static union sched_config *
+sys32_fetch_config(int policy, const void __user *u_config, size_t *len)
+{
+	union compat_sched_config *cbuf;
+	union sched_config *buf;
+	int ret, n;
+
+	if (u_config == NULL)
+		return ERR_PTR(-EFAULT);
+
+	if (policy == SCHED_QUOTA && *len < sizeof(cbuf->quota))
+		return ERR_PTR(-EINVAL);
+
+	cbuf = xnmalloc(*len);
+	if (cbuf == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = cobalt_copy_from_user(cbuf, u_config, *len);
+	if (ret) {
+		buf = ERR_PTR(ret);
+		goto out;
+	}
+
+	switch (policy) {
+	case SCHED_TP:
+		*len = sched_tp_confsz(cbuf->tp.nr_windows);
+		break;
+	case SCHED_QUOTA:
+		break;
+	default:
+		buf = ERR_PTR(-EINVAL);
+		goto out;
+	}
+
+	buf = xnmalloc(*len);
+	if (buf == NULL) {
+		buf = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	if (policy == SCHED_QUOTA)
+		memcpy(&buf->quota, &cbuf->quota, sizeof(cbuf->quota));
+	else {
+		buf->tp.op = cbuf->tp.op;
+		buf->tp.nr_windows = cbuf->tp.nr_windows;
+		for (n = 0; n < buf->tp.nr_windows; n++) {
+			buf->tp.windows[n].ptid = cbuf->tp.windows[n].ptid;
+			buf->tp.windows[n].offset.tv_sec = cbuf->tp.windows[n].offset.tv_sec;
+			buf->tp.windows[n].offset.tv_nsec = cbuf->tp.windows[n].offset.tv_nsec;
+			buf->tp.windows[n].duration.tv_sec = cbuf->tp.windows[n].duration.tv_sec;
+			buf->tp.windows[n].duration.tv_nsec = cbuf->tp.windows[n].duration.tv_nsec;
+		}
+	}
+out:
+	xnfree(cbuf);
+
+	return buf;
+}
+
+static int sys32_ack_config(int policy, const union sched_config *config,
+			    void __user *u_config)
+{
+	union compat_sched_config __user *u_p = u_config;
+
+	if (policy != SCHED_QUOTA)
+		return 0;
+
+	return u_config == NULL ? -EFAULT :
+		cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+				       sizeof(u_p->quota.info));
+}
+
+static ssize_t sys32_put_config(int policy,
+				void __user *u_config, size_t u_len,
+				const union sched_config *config, size_t len)
+{
+	union compat_sched_config __user *u_p = u_config;
+	int n, ret;
+
+	if (u_config == NULL)
+		return -EFAULT;
+
+	if (policy == SCHED_QUOTA) {
+		if (u_len < sizeof(u_p->quota))
+			return -EINVAL;
+		return cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+					      sizeof(u_p->quota.info)) ?:
+			sizeof(u_p->quota.info);
+	}
+
+	/* SCHED_TP */
+
+	if (u_len < compat_sched_tp_confsz(config->tp.nr_windows))
+		return -ENOSPC;
+
+	__xn_put_user(config->tp.op, &u_p->tp.op);
+	__xn_put_user(config->tp.nr_windows, &u_p->tp.nr_windows);
+
+	for (n = 0, ret = 0; n < config->tp.nr_windows; n++) {
+		ret |= __xn_put_user(config->tp.windows[n].ptid,
+				     &u_p->tp.windows[n].ptid);
+		ret |= __xn_put_user(config->tp.windows[n].offset.tv_sec,
+				     &u_p->tp.windows[n].offset.tv_sec);
+		ret |= __xn_put_user(config->tp.windows[n].offset.tv_nsec,
+				     &u_p->tp.windows[n].offset.tv_nsec);
+		ret |= __xn_put_user(config->tp.windows[n].duration.tv_sec,
+				     &u_p->tp.windows[n].duration.tv_sec);
+		ret |= __xn_put_user(config->tp.windows[n].duration.tv_nsec,
+				     &u_p->tp.windows[n].duration.tv_nsec);
+	}
+
+	return ret ?: u_len;
+}
+
+COBALT_SYSCALL32emu(sched_setconfig_np, conforming,
+		    (int cpu, int policy,
+		     union compat_sched_config __user *u_config,
+		     size_t len))
+{
+	return __cobalt_sched_setconfig_np(cpu, policy, u_config, len,
+					   sys32_fetch_config, sys32_ack_config);
+}
+
+COBALT_SYSCALL32emu(sched_getconfig_np, conformin,
+		    (int cpu, int policy,
+		     union compat_sched_config __user *u_config,
+		     size_t len))
+{
+	return __cobalt_sched_getconfig_np(cpu, policy, u_config, len,
+					   sys32_fetch_config, sys32_put_config);
+}
+
+COBALT_SYSCALL32emu(sched_setscheduler_ex, conforming,
+		    (compat_pid_t pid,
+		     int policy,
+		     const struct compat_sched_param_ex __user *u_param_ex,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_sched_setscheduler_ex(pid, policy, &param_ex,
+					    u_winoff, u_promoted);
+}
+
+COBALT_SYSCALL32emu(sched_getscheduler_ex, current,
+		    (compat_pid_t pid,
+		     int __user *u_policy,
+		     struct compat_sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_sched_getscheduler_ex(pid, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy));
+
+	return ret ?: sys32_put_param_ex(policy, u_param, &param_ex);
+}
+
+COBALT_SYSCALL32emu(timer_create, current,
+		    (clockid_t clock,
+		     const struct compat_sigevent __user *u_sev,
+		     timer_t __user *u_tm))
+{
+	struct sigevent sev, *evp = NULL;
+	int ret;
+
+	if (u_sev) {
+		evp = &sev;
+		ret = sys32_get_sigevent(&sev, u_sev);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_timer_create(clock, evp, u_tm);
+}
+
+COBALT_SYSCALL32emu(timer_settime, primary,
+		    (timer_t tm, int flags,
+		     const struct old_itimerspec32 __user *u_newval,
+		     struct old_itimerspec32 __user *u_oldval))
+{
+	struct itimerspec64 newv, oldv, *oldvp = &oldv;
+	int ret;
+
+	if (u_oldval == NULL)
+		oldvp = NULL;
+
+	ret = sys32_get_itimerspec(&newv, u_newval);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_timer_settime(tm, flags, &newv, oldvp);
+	if (ret)
+		return ret;
+
+	if (oldvp) {
+		ret = sys32_put_itimerspec(u_oldval, oldvp);
+		if (ret)
+			__cobalt_timer_settime(tm, flags, oldvp, NULL);
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL32emu(timer_gettime, current,
+		    (timer_t tm, struct old_itimerspec32 __user *u_val))
+{
+	struct itimerspec64 val;
+	int ret;
+
+	ret = __cobalt_timer_gettime(tm, &val);
+
+	return ret ?: sys32_put_itimerspec(u_val, &val);
+}
+
+COBALT_SYSCALL32emu(timerfd_settime, primary,
+		    (int fd, int flags,
+		     const struct old_itimerspec32 __user *new_value,
+		     struct old_itimerspec32 __user *old_value))
+{
+	struct itimerspec64 ovalue, value;
+	int ret;
+
+	ret = sys32_get_itimerspec(&value, new_value);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_timerfd_settime(fd, flags, &value, &ovalue);
+	if (ret)
+		return ret;
+
+	if (old_value) {
+		ret = sys32_put_itimerspec(old_value, &ovalue);
+		value.it_value.tv_sec = 0;
+		value.it_value.tv_nsec = 0;
+		__cobalt_timerfd_settime(fd, flags, &value, NULL);
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL32emu(timerfd_gettime, current,
+		    (int fd, struct old_itimerspec32 __user *curr_value))
+{
+	struct itimerspec64 value;
+	int ret;
+
+	ret = __cobalt_timerfd_gettime(fd, &value);
+
+	return ret ?: sys32_put_itimerspec(curr_value, &value);
+}
+
+COBALT_SYSCALL32emu(sigwait, primary,
+		    (const compat_sigset_t __user *u_set,
+		     int __user *u_sig))
+{
+	sigset_t set;
+	int ret, sig;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	sig = __cobalt_sigwait(&set);
+	if (sig < 0)
+		return sig;
+
+	return cobalt_copy_to_user(u_sig, &sig, sizeof(*u_sig));
+}
+
+COBALT_SYSCALL32emu(sigtimedwait, nonrestartable,
+		    (const compat_sigset_t __user *u_set,
+		     struct compat_siginfo __user *u_si,
+		     const struct old_timespec32 __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+	int ret;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	ret = sys32_get_timespec(&timeout, u_timeout);
+	if (ret)
+		return ret;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, true);
+}
+
+COBALT_SYSCALL32emu(sigtimedwait64, nonrestartable,
+		    (const compat_sigset_t __user *u_set,
+		     struct compat_siginfo __user *u_si,
+		     const struct __kernel_timespec __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+	int ret;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	ret = cobalt_get_timespec64(&timeout, u_timeout);
+	if (ret)
+		return ret;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, true);
+}
+
+COBALT_SYSCALL32emu(sigwaitinfo, nonrestartable,
+		    (const compat_sigset_t __user *u_set,
+		     struct compat_siginfo __user *u_si))
+{
+	sigset_t set;
+	int ret;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	return __cobalt_sigwaitinfo(&set, u_si, true);
+}
+
+COBALT_SYSCALL32emu(sigpending, primary, (compat_old_sigset_t __user *u_set))
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+
+	return sys32_put_sigset((compat_sigset_t *)u_set, &curr->sigpending);
+}
+
+COBALT_SYSCALL32emu(sigqueue, conforming,
+		    (pid_t pid, int sig,
+		     const union compat_sigval __user *u_value))
+{
+	union sigval val;
+	int ret;
+
+	ret = sys32_get_sigval(&val, u_value);
+
+	return ret ?: __cobalt_sigqueue(pid, sig, &val);
+}
+
+COBALT_SYSCALL32emu(monitor_wait, nonrestartable,
+		    (struct cobalt_monitor_shadow __user *u_mon,
+		     int event, const struct old_timespec32 __user *u_ts,
+		     int __user *u_ret))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = sys32_get_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_monitor_wait(u_mon, event, tsp, u_ret);
+}
+
+COBALT_SYSCALL32emu(monitor_wait64, nonrestartable,
+		    (struct cobalt_monitor_shadow __user *u_mon, int event,
+		     const struct __kernel_timespec __user *u_ts,
+		     int __user *u_ret))
+{
+	return __cobalt_monitor_wait64(u_mon, event, u_ts, u_ret);
+}
+
+COBALT_SYSCALL32emu(event_wait, primary,
+		    (struct cobalt_event_shadow __user *u_event,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode, const struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = sys32_get_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp);
+}
+
+COBALT_SYSCALL32emu(event_wait64, primary,
+		    (struct cobalt_event_shadow __user *u_event,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_event_wait64(u_event, bits, u_bits_r, mode, u_ts);
+}
+
+COBALT_SYSCALL32emu(select, primary,
+		    (int nfds,
+		     compat_fd_set __user *u_rfds,
+		     compat_fd_set __user *u_wfds,
+		     compat_fd_set __user *u_xfds,
+		     struct old_timeval32 __user *u_tv))
+{
+	return __cobalt_select(nfds, u_rfds, u_wfds, u_xfds, u_tv, true);
+}
+
+COBALT_SYSCALL32emu(recvmsg, handover,
+		    (int fd, struct compat_msghdr __user *umsg,
+		     int flags))
+{
+	struct user_msghdr m;
+	ssize_t ret;
+
+	ret = sys32_get_msghdr(&m, umsg);
+	if (ret)
+		return ret;
+
+	ret = rtdm_fd_recvmsg(fd, &m, flags);
+	if (ret < 0)
+		return ret;
+
+	return sys32_put_msghdr(umsg, &m) ?: ret;
+}
+
+static int get_timespec32(struct timespec64 *ts,
+			  const void __user *u_ts)
+{
+	return sys32_get_timespec(ts, u_ts);
+}
+
+static int get_mmsg32(struct mmsghdr *mmsg, void __user *u_mmsg)
+{
+	return sys32_get_mmsghdr(mmsg, u_mmsg);
+}
+
+static int put_mmsg32(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct compat_mmsghdr __user **p = (struct compat_mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return sys32_put_mmsghdr(q, mmsg);
+}
+
+COBALT_SYSCALL32emu(recvmmsg, primary,
+	       (int ufd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen,
+		unsigned int flags, struct old_timespec32 *u_timeout))
+{
+	return __rtdm_fd_recvmmsg(ufd, u_msgvec, vlen, flags, u_timeout,
+				  get_mmsg32, put_mmsg32,
+				  get_timespec32);
+}
+
+COBALT_SYSCALL32emu(recvmmsg64, primary,
+		    (int ufd, struct compat_mmsghdr __user *u_msgvec,
+		     unsigned int vlen, unsigned int flags,
+		     struct __kernel_timespec *u_timeout))
+{
+	return __rtdm_fd_recvmmsg64(ufd, u_msgvec, vlen, flags, u_timeout,
+				    get_mmsg32, put_mmsg32);
+}
+
+COBALT_SYSCALL32emu(sendmsg, handover,
+		    (int fd, struct compat_msghdr __user *umsg, int flags))
+{
+	struct user_msghdr m;
+	int ret;
+
+	ret = sys32_get_msghdr(&m, umsg);
+
+	return ret ?: rtdm_fd_sendmsg(fd, &m, flags);
+}
+
+static int put_mmsglen32(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct compat_mmsghdr __user **p = (struct compat_mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return __xn_put_user(mmsg->msg_len, &q->msg_len);
+}
+
+COBALT_SYSCALL32emu(sendmmsg, primary,
+		    (int fd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen,
+		     unsigned int flags))
+{
+	return __rtdm_fd_sendmmsg(fd, u_msgvec, vlen, flags,
+				  get_mmsg32, put_mmsglen32);
+}
+
+COBALT_SYSCALL32emu(mmap, lostage,
+		    (int fd, struct compat_rtdm_mmap_request __user *u_crma,
+		     compat_uptr_t __user *u_caddrp))
+{
+	struct _rtdm_mmap_request rma;
+	compat_uptr_t u_caddr;
+	void *u_addr = NULL;
+	int ret;
+
+	if (u_crma == NULL ||
+	    !access_rok(u_crma, sizeof(*u_crma)) ||
+	    __xn_get_user(rma.length, &u_crma->length) ||
+	    __xn_get_user(rma.offset, &u_crma->offset) ||
+	    __xn_get_user(rma.prot, &u_crma->prot) ||
+	    __xn_get_user(rma.flags, &u_crma->flags))
+	  return -EFAULT;
+
+	ret = rtdm_fd_mmap(fd, &rma, &u_addr);
+	if (ret)
+		return ret;
+
+	u_caddr = ptr_to_compat(u_addr);
+
+	return cobalt_copy_to_user(u_caddrp, &u_caddr, sizeof(u_caddr));
+}
+
+COBALT_SYSCALL32emu(backtrace, current,
+		    (int nr, compat_ulong_t __user *u_backtrace,
+		     int reason))
+{
+	compat_ulong_t cbacktrace[SIGSHADOW_BACKTRACE_DEPTH];
+	unsigned long backtrace[SIGSHADOW_BACKTRACE_DEPTH];
+	int ret, n;
+
+	if (nr <= 0)
+		return 0;
+
+	if (nr > SIGSHADOW_BACKTRACE_DEPTH)
+		nr = SIGSHADOW_BACKTRACE_DEPTH;
+
+	ret = cobalt_copy_from_user(cbacktrace, u_backtrace,
+				       nr * sizeof(compat_ulong_t));
+	if (ret)
+		return ret;
+
+	for (n = 0; n < nr; n++)
+		backtrace [n] = cbacktrace[n];
+
+	xndebug_trace_relax(nr, backtrace, reason);
+
+	return 0;
+}
+++ linux-patched/kernel/xenomai/posix/clock.c	2022-03-21 12:58:28.987892881 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/monitor.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/clocksource.h>
+#include <linux/bitmap.h>
+#include <cobalt/kernel/clock.h>
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+static struct xnclock *external_clocks[COBALT_MAX_EXTCLOCKS];
+
+DECLARE_BITMAP(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS);
+
+#define do_ext_clock(__clock_id, __handler, __ret, __args...)	\
+({								\
+	struct xnclock *__clock;				\
+	int __val = 0, __nr;					\
+	spl_t __s;						\
+								\
+	if (!__COBALT_CLOCK_EXT_P(__clock_id))			\
+		__val = -EINVAL;				\
+	else {							\
+		__nr = __COBALT_CLOCK_EXT_INDEX(__clock_id);	\
+		xnlock_get_irqsave(&nklock, __s);		\
+		if (!test_bit(__nr, cobalt_clock_extids)) {	\
+			xnlock_put_irqrestore(&nklock, __s);	\
+			__val = -EINVAL;			\
+		} else {					\
+			__clock = external_clocks[__nr];	\
+			(__ret) = xnclock_ ## __handler(__clock, ##__args); \
+			xnlock_put_irqrestore(&nklock, __s);	\
+		}						\
+	}							\
+	__val;							\
+})
+
+int __cobalt_clock_getres(clockid_t clock_id, struct timespec64 *ts)
+{
+	xnticks_t ns;
+	int ret;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+		ns2ts(ts, 1);
+		break;
+	default:
+		ret = do_ext_clock(clock_id, get_resolution, ns);
+		if (ret)
+			return ret;
+		ns2ts(ts, ns);
+	}
+
+	trace_cobalt_clock_getres(clock_id, ts);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_getres, current,
+	       (clockid_t clock_id, struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_getres(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (u_ts && cobalt_put_u_timespec(u_ts, &ts))
+		return -EFAULT;
+
+	trace_cobalt_clock_getres(clock_id, &ts);
+
+	return 0;
+}
+
+int __cobalt_clock_getres64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_getres(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (cobalt_put_timespec64(&ts, u_ts))
+		return -EFAULT;
+
+	trace_cobalt_clock_getres(clock_id, &ts);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_getres64, current,
+	       (clockid_t clock_id, struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_getres64(clock_id, u_ts);
+}
+
+int __cobalt_clock_gettime(clockid_t clock_id, struct timespec64 *ts)
+{
+	xnticks_t ns;
+	int ret;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+		ns2ts(ts, xnclock_read_realtime(&nkclock));
+		break;
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+		ns2ts(ts, xnclock_read_monotonic(&nkclock));
+		break;
+	case CLOCK_HOST_REALTIME:
+		if (pipeline_get_host_time(ts) != 0)
+			return -EINVAL;
+		break;
+	default:
+		ret = do_ext_clock(clock_id, read_monotonic, ns);
+		if (ret)
+			return ret;
+		ns2ts(ts, ns);
+	}
+
+	trace_cobalt_clock_gettime(clock_id, ts);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_gettime, current,
+	       (clockid_t clock_id, struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_gettime(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (cobalt_put_u_timespec(u_ts, &ts))
+		return -EFAULT;
+
+	return 0;
+}
+
+int __cobalt_clock_gettime64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_gettime(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (cobalt_put_timespec64(&ts, u_ts))
+		return -EFAULT;
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_gettime64, current,
+	       (clockid_t clock_id, struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_gettime64(clock_id, u_ts);
+}
+
+int __cobalt_clock_settime(clockid_t clock_id, const struct timespec64 *ts)
+{
+	int _ret, ret = 0;
+
+	if ((unsigned long)ts->tv_nsec >= ONE_BILLION)
+		return -EINVAL;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+		ret = pipeline_set_wallclock(ts2ns(ts));
+		break;
+	default:
+		_ret = do_ext_clock(clock_id, set_time, ret, ts);
+		if (_ret || ret)
+			return _ret ?: ret;
+	}
+
+	trace_cobalt_clock_settime(clock_id, ts);
+
+	return ret;
+}
+
+int __cobalt_clock_adjtime(clockid_t clock_id, struct __kernel_timex *tx)
+{
+	int _ret, ret = 0;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+	case CLOCK_HOST_REALTIME:
+		return -EOPNOTSUPP;
+	default:
+		_ret = do_ext_clock(clock_id, adjust_time, ret, tx);
+		if (_ret || ret)
+			return _ret ?: ret;
+	}
+
+	trace_cobalt_clock_adjtime(clock_id, tx);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_settime, current,
+	       (clockid_t clock_id, const struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts;
+
+	if (cobalt_get_u_timespec(&ts, u_ts))
+		return -EFAULT;
+
+	return __cobalt_clock_settime(clock_id, &ts);
+}
+
+int __cobalt_clock_settime64(clockid_t clock_id,
+			const struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts64;
+
+	if (cobalt_get_timespec64(&ts64, u_ts))
+		return -EFAULT;
+
+	return __cobalt_clock_settime(clock_id, &ts64);
+}
+
+COBALT_SYSCALL(clock_settime64, current,
+	       (clockid_t clock_id, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_settime64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL(clock_adjtime, current,
+	       (clockid_t clock_id, struct __user_old_timex __user *u_tx))
+{
+	struct __kernel_timex tx;
+	int ret;
+
+	if (cobalt_copy_from_user(&tx, u_tx, sizeof(tx)))
+		return -EFAULT;
+
+	ret = __cobalt_clock_adjtime(clock_id, &tx);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_tx, &tx, sizeof(tx));
+}
+
+int __cobalt_clock_adjtime64(clockid_t clock_id,
+			struct __kernel_timex __user *u_tx)
+{
+	struct __kernel_timex tx;
+	int ret;
+
+	if (cobalt_copy_from_user(&tx, u_tx, sizeof(tx)))
+		return -EFAULT;
+
+	ret = __cobalt_clock_adjtime(clock_id, &tx);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_tx, &tx, sizeof(tx));
+}
+
+COBALT_SYSCALL(clock_adjtime64, current,
+	       (clockid_t clock_id, struct __kernel_timex __user *u_tx))
+{
+	return __cobalt_clock_adjtime64(clock_id, u_tx);
+}
+
+int __cobalt_clock_nanosleep(clockid_t clock_id, int flags,
+			     const struct timespec64 *rqt,
+			     struct timespec64 *rmt)
+{
+	struct restart_block *restart;
+	struct xnthread *cur;
+	xnsticks_t timeout, rem;
+	spl_t s;
+
+	trace_cobalt_clock_nanosleep(clock_id, flags, rqt);
+
+	if (clock_id != CLOCK_MONOTONIC &&
+	    clock_id != CLOCK_MONOTONIC_RAW &&
+	    clock_id != CLOCK_REALTIME)
+		return -EOPNOTSUPP;
+
+	if (rqt->tv_sec < 0)
+		return -EINVAL;
+
+	if ((unsigned long)rqt->tv_nsec >= ONE_BILLION)
+		return -EINVAL;
+
+	if (flags & ~TIMER_ABSTIME)
+		return -EINVAL;
+
+	cur = xnthread_current();
+
+	if (xnthread_test_localinfo(cur, XNSYSRST)) {
+		xnthread_clear_localinfo(cur, XNSYSRST);
+
+		restart = cobalt_get_restart_block(current);
+
+		if (restart->fn != cobalt_restart_syscall_placeholder) {
+			if (rmt) {
+				xnlock_get_irqsave(&nklock, s);
+				rem = xntimer_get_timeout_stopped(&cur->rtimer);
+				xnlock_put_irqrestore(&nklock, s);
+				ns2ts(rmt, rem > 1 ? rem : 0);
+			}
+			return -EINTR;
+		}
+
+		timeout = restart->nanosleep.expires;
+	} else
+		timeout = ts2ns(rqt);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnthread_suspend(cur, XNDELAY, timeout + 1,
+			 clock_flag(flags, clock_id), NULL);
+
+	if (xnthread_test_info(cur, XNBREAK)) {
+		if (signal_pending(current)) {
+			restart = cobalt_get_restart_block(current);
+			restart->nanosleep.expires =
+				(flags & TIMER_ABSTIME) ? timeout :
+				    xntimer_get_timeout_stopped(&cur->rtimer);
+			xnlock_put_irqrestore(&nklock, s);
+			restart->fn = cobalt_restart_syscall_placeholder;
+
+			xnthread_set_localinfo(cur, XNSYSRST);
+
+			return -ERESTARTSYS;
+		}
+
+		if (flags == 0 && rmt) {
+			rem = xntimer_get_timeout_stopped(&cur->rtimer);
+			xnlock_put_irqrestore(&nklock, s);
+			ns2ts(rmt, rem > 1 ? rem : 0);
+		} else
+			xnlock_put_irqrestore(&nklock, s);
+
+		return -EINTR;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_nanosleep, primary,
+	       (clockid_t clock_id, int flags,
+		const struct __user_old_timespec __user *u_rqt,
+		struct __user_old_timespec __user *u_rmt))
+{
+	struct timespec64 rqt, rmt, *rmtp = NULL;
+	int ret;
+
+	if (u_rmt)
+		rmtp = &rmt;
+
+	if (cobalt_get_u_timespec(&rqt, u_rqt))
+		return -EFAULT;
+
+	ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp);
+	if (ret == -EINTR && flags == 0 && rmtp) {
+		if (cobalt_put_u_timespec(u_rmt, rmtp))
+			return -EFAULT;
+	}
+
+	return ret;
+}
+
+int __cobalt_clock_nanosleep64(clockid_t clock_id, int flags,
+		const struct __kernel_timespec __user *u_rqt,
+		struct __kernel_timespec __user *u_rmt)
+{
+	struct timespec64 rqt, rmt, *rmtp = NULL;
+	int ret;
+
+	if (u_rmt)
+		rmtp = &rmt;
+
+	if (cobalt_get_timespec64(&rqt, u_rqt))
+		return -EFAULT;
+
+	ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp);
+	if (ret == -EINTR && flags == 0 && rmtp) {
+		if (cobalt_put_timespec64(rmtp, u_rmt))
+			return -EFAULT;
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(clock_nanosleep64, primary,
+	       (clockid_t clock_id, int flags,
+		const struct __kernel_timespec __user *u_rqt,
+		struct __kernel_timespec __user *u_rmt))
+{
+	return __cobalt_clock_nanosleep64(clock_id, flags, u_rqt, u_rmt);
+}
+
+int cobalt_clock_register(struct xnclock *clock, const cpumask_t *affinity,
+			  clockid_t *clk_id)
+{
+	int ret, nr;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	nr = find_first_zero_bit(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS);
+	if (nr >= COBALT_MAX_EXTCLOCKS) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EAGAIN;
+	}
+
+	/*
+	 * CAUTION: a bit raised in cobalt_clock_extids means that the
+	 * corresponding entry in external_clocks[] is valid. The
+	 * converse assumption is NOT true.
+	 */
+	__set_bit(nr, cobalt_clock_extids);
+	external_clocks[nr] = clock;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = xnclock_register(clock, affinity);
+	if (ret)
+		return ret;
+
+	clock->id = nr;
+	*clk_id = __COBALT_CLOCK_EXT(clock->id);
+
+	trace_cobalt_clock_register(clock->name, *clk_id);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cobalt_clock_register);
+
+void cobalt_clock_deregister(struct xnclock *clock)
+{
+	trace_cobalt_clock_deregister(clock->name, clock->id);
+	clear_bit(clock->id, cobalt_clock_extids);
+	smp_mb__after_atomic();
+	external_clocks[clock->id] = NULL;
+	xnclock_deregister(clock);
+}
+EXPORT_SYMBOL_GPL(cobalt_clock_deregister);
+
+struct xnclock *cobalt_clock_find(clockid_t clock_id)
+{
+	struct xnclock *clock = ERR_PTR(-EINVAL);
+	spl_t s;
+	int nr;
+
+	if (clock_id == CLOCK_MONOTONIC ||
+	    clock_id == CLOCK_MONOTONIC_RAW ||
+	    clock_id == CLOCK_REALTIME)
+		return &nkclock;
+
+	if (__COBALT_CLOCK_EXT_P(clock_id)) {
+		nr = __COBALT_CLOCK_EXT_INDEX(clock_id);
+		xnlock_get_irqsave(&nklock, s);
+		if (test_bit(nr, cobalt_clock_extids))
+			clock = external_clocks[nr];
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return clock;
+}
+EXPORT_SYMBOL_GPL(cobalt_clock_find);
+++ linux-patched/kernel/xenomai/posix/monitor.c	2022-03-21 12:58:28.984892911 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/syscall32.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include "monitor.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+/*
+ * The Cobalt monitor is a double-wait condition object, serializing
+ * accesses through a gate. It behaves like a mutex + two condition
+ * variables combo with extended signaling logic. Folding several
+ * conditions and the serialization support into a single object
+ * performs better on low end hw caches and allows for specific
+ * optimizations, compared to using separate general-purpose mutex and
+ * condvars. This object is used by the Copperplate interface
+ * internally when it runs over the Cobalt core.
+ *
+ * Threads can wait for some resource(s) to be granted (consumer
+ * side), or wait for the available resource(s) to drain (producer
+ * side).  Therefore, signals are thread-directed for the grant side,
+ * and monitor-directed for the drain side.
+ *
+ * Typically, a consumer would wait for the GRANT condition to be
+ * satisfied, signaling the DRAINED condition when more resources
+ * could be made available if the protocol implements output
+ * contention (e.g. the write side of a message queue waiting for the
+ * consumer to release message slots). Conversely, a producer would
+ * wait for the DRAINED condition to be satisfied, issuing GRANT
+ * signals once more resources have been made available to the
+ * consumer.
+ *
+ * Implementation-wise, the monitor logic is shared with the Cobalt
+ * thread object.
+ */
+COBALT_SYSCALL(monitor_init, current,
+	       (struct cobalt_monitor_shadow __user *u_mon,
+		clockid_t clk_id, int flags))
+{
+	struct cobalt_monitor_shadow shadow;
+	struct cobalt_monitor_state *state;
+	struct cobalt_monitor *mon;
+	int pshared, tmode, ret;
+	struct cobalt_umm *umm;
+	unsigned long stateoff;
+	spl_t s;
+
+	tmode = clock_flag(TIMER_ABSTIME, clk_id);
+	if (tmode < 0)
+		return -EINVAL;
+
+	mon = xnmalloc(sizeof(*mon));
+	if (mon == NULL)
+		return -ENOMEM;
+
+	pshared = (flags & COBALT_MONITOR_SHARED) != 0;
+	umm = &cobalt_ppd_get(pshared)->umm;
+	state = cobalt_umm_alloc(umm, sizeof(*state));
+	if (state == NULL) {
+		xnfree(mon);
+		return -EAGAIN;
+	}
+
+	ret = xnregistry_enter_anon(mon, &mon->resnode.handle);
+	if (ret) {
+		cobalt_umm_free(umm, state);
+		xnfree(mon);
+		return ret;
+	}
+
+	mon->state = state;
+	xnsynch_init(&mon->gate, XNSYNCH_PI, &state->owner);
+	xnsynch_init(&mon->drain, XNSYNCH_PRIO, NULL);
+	mon->flags = flags;
+	mon->tmode = tmode;
+	INIT_LIST_HEAD(&mon->waiters);
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_add_resource(&mon->resnode, monitor, pshared);
+	mon->magic = COBALT_MONITOR_MAGIC;
+	xnlock_put_irqrestore(&nklock, s);
+
+	state->flags = 0;
+	stateoff = cobalt_umm_offset(umm, state);
+	XENO_BUG_ON(COBALT, stateoff != (__u32)stateoff);
+	shadow.flags = flags;
+	shadow.handle = mon->resnode.handle;
+	shadow.state_offset = (__u32)stateoff;
+
+	return cobalt_copy_to_user(u_mon, &shadow, sizeof(*u_mon));
+}
+
+/* nklock held, irqs off */
+static int monitor_enter(xnhandle_t handle, struct xnthread *curr)
+{
+	struct cobalt_monitor *mon;
+	int info;
+
+	mon = xnregistry_lookup(handle, NULL); /* (Re)validate. */
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC)
+		return -EINVAL;
+
+	info = xnsynch_acquire(&mon->gate, XN_INFINITE, XN_RELATIVE);
+	if (info)
+		/* Break or error, no timeout possible. */
+		return info & XNBREAK ? -EINTR : -EINVAL;
+
+	mon->state->flags &= ~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST);
+
+	return 0;
+}
+
+COBALT_SYSCALL(monitor_enter, primary,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct xnthread *curr = xnthread_current();
+	xnhandle_t handle;
+	int ret;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+	ret = monitor_enter(handle, curr);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+/* nklock held, irqs off */
+static void monitor_wakeup(struct cobalt_monitor *mon)
+{
+	struct cobalt_monitor_state *state = mon->state;
+	struct cobalt_thread *thread, *tmp;
+	struct xnthread *p;
+	int bcast;
+
+	/*
+	 * Having the GRANT signal pending does not necessarily mean
+	 * that somebody is actually waiting for it, so we have to
+	 * check both conditions below.
+	 */
+	bcast = (state->flags & COBALT_MONITOR_BROADCAST) != 0;
+	if ((state->flags & COBALT_MONITOR_GRANTED) == 0 ||
+	    list_empty(&mon->waiters))
+		goto drain;
+
+	/*
+	 * Unblock waiters requesting a grant, either those who
+	 * received it only or all of them, depending on the broadcast
+	 * bit.
+	 *
+	 * We update the PENDED flag to inform userland about the
+	 * presence of waiters, so that it may decide not to issue any
+	 * syscall for exiting the monitor if nobody else is waiting
+	 * at the gate.
+	 */
+	list_for_each_entry_safe(thread, tmp, &mon->waiters, monitor_link) {
+		p = &thread->threadbase;
+		/*
+		 * A thread might receive a grant signal albeit it
+		 * does not wait on a monitor, or it might have timed
+		 * out before we got there, so we really have to check
+		 * that ->wchan does match our sleep queue.
+		 */
+		if (bcast ||
+		    (p->u_window->grant_value && p->wchan == &thread->monitor_synch)) {
+			xnsynch_wakeup_this_sleeper(&thread->monitor_synch, p);
+			list_del_init(&thread->monitor_link);
+		}
+	}
+drain:
+	/*
+	 * Unblock threads waiting for a drain event if that signal is
+	 * pending, either one or all, depending on the broadcast
+	 * flag.
+	 */
+	if ((state->flags & COBALT_MONITOR_DRAINED) != 0 &&
+	    xnsynch_pended_p(&mon->drain)) {
+		if (bcast)
+			xnsynch_flush(&mon->drain, 0);
+		else
+			xnsynch_wakeup_one_sleeper(&mon->drain);
+	}
+
+	if (list_empty(&mon->waiters) && !xnsynch_pended_p(&mon->drain))
+		state->flags &= ~COBALT_MONITOR_PENDED;
+}
+
+int __cobalt_monitor_wait(struct cobalt_monitor_shadow __user *u_mon,
+			  int event, const struct timespec64 *ts,
+			  int __user *u_ret)
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+	struct cobalt_monitor_state *state;
+	xnticks_t timeout = XN_INFINITE;
+	int ret = 0, opret = 0, info;
+	struct cobalt_monitor *mon;
+	struct xnsynch *synch;
+	xnhandle_t handle;
+	xntmode_t tmode;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+
+	if (ts) {
+		if (!timespec64_valid(ts))
+			return -EINVAL;
+
+		timeout = ts2ns(ts) + 1;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * The current thread might have sent signals to the monitor
+	 * it wants to sleep on: wake up satisfied waiters before
+	 * going to sleep.
+	 */
+	state = mon->state;
+	if (state->flags & COBALT_MONITOR_SIGNALED)
+		monitor_wakeup(mon);
+
+	synch = &curr->monitor_synch;
+	if (event & COBALT_MONITOR_WAITDRAIN)
+		synch = &mon->drain;
+	else {
+		curr->threadbase.u_window->grant_value = 0;
+		list_add_tail(&curr->monitor_link, &mon->waiters);
+	}
+
+	/*
+	 * Tell userland that somebody is now waiting for a signal, so
+	 * that later exiting the monitor on the producer side will
+	 * trigger a wakeup syscall.
+	 *
+	 * CAUTION: we must raise the PENDED flag while holding the
+	 * gate mutex, to prevent a signal from sneaking in from a
+	 * remote CPU without the producer issuing the corresponding
+	 * wakeup call when dropping the gate lock.
+	 */
+	state->flags |= COBALT_MONITOR_PENDED;
+
+	tmode = ts ? mon->tmode : XN_RELATIVE;
+
+	/* Release the gate prior to waiting, all atomically. */
+	xnsynch_release(&mon->gate, &curr->threadbase);
+
+	info = xnsynch_sleep_on(synch, timeout, tmode);
+	if (info) {
+		if ((event & COBALT_MONITOR_WAITDRAIN) == 0 &&
+		    !list_empty(&curr->monitor_link))
+			list_del_init(&curr->monitor_link);
+
+		if (list_empty(&mon->waiters) && !xnsynch_pended_p(&mon->drain))
+			state->flags &= ~COBALT_MONITOR_PENDED;
+
+		if (info & XNBREAK) {
+			opret = -EINTR;
+			goto out;
+		}
+		if (info & XNTIMEO)
+			opret = -ETIMEDOUT;
+	}
+
+	ret = monitor_enter(handle, &curr->threadbase);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	__xn_put_user(opret, u_ret);
+
+	return ret;
+}
+
+int __cobalt_monitor_wait64(struct cobalt_monitor_shadow __user *u_mon,
+			    int event,
+			    const struct __kernel_timespec __user *u_ts,
+			    int __user *u_ret)
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_timespec64(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_monitor_wait(u_mon, event, tsp, u_ret);
+}
+
+COBALT_SYSCALL(monitor_wait, nonrestartable,
+	       (struct cobalt_monitor_shadow __user *u_mon,
+	       int event, const struct __user_old_timespec __user *u_ts,
+	       int __user *u_ret))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_u_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_monitor_wait(u_mon, event, tsp, u_ret);
+}
+
+COBALT_SYSCALL(monitor_wait64, nonrestartable,
+	       (struct cobalt_monitor_shadow __user *u_mon, int event,
+		const struct __kernel_timespec __user *u_ts, int __user *u_ret))
+{
+	return __cobalt_monitor_wait64(u_mon, event, u_ts, u_ret);
+}
+
+COBALT_SYSCALL(monitor_sync, nonrestartable,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct cobalt_monitor *mon;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC)
+		ret = -EINVAL;
+	else if (mon->state->flags & COBALT_MONITOR_SIGNALED) {
+		monitor_wakeup(mon);
+		xnsynch_release(&mon->gate, curr);
+		xnsched_run();
+		ret = monitor_enter(handle, curr);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(monitor_exit, primary,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct cobalt_monitor *mon;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC)
+		ret = -EINVAL;
+	else {
+		if (mon->state->flags & COBALT_MONITOR_SIGNALED)
+			monitor_wakeup(mon);
+
+		xnsynch_release(&mon->gate, curr);
+		xnsched_run();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(monitor_destroy, primary,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct cobalt_monitor_state *state;
+	struct cobalt_monitor *mon;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	state = mon->state;
+	if ((state->flags & COBALT_MONITOR_PENDED) != 0 ||
+	    xnsynch_pended_p(&mon->drain) || !list_empty(&mon->waiters)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	/*
+	 * A monitor must be destroyed by the thread currently holding
+	 * its gate lock.
+	 */
+	if (xnsynch_owner_check(&mon->gate, curr)) {
+		ret = -EPERM;
+		goto fail;
+	}
+
+	cobalt_monitor_reclaim(&mon->resnode, s); /* drops lock */
+
+	xnsched_run();
+
+	return 0;
+ fail:
+	xnlock_put_irqrestore(&nklock, s);
+	
+	return ret;
+}
+
+void cobalt_monitor_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_monitor *mon;
+	struct cobalt_umm *umm;
+	int pshared;
+
+	mon = container_of(node, struct cobalt_monitor, resnode);
+	pshared = (mon->flags & COBALT_MONITOR_SHARED) != 0;
+	xnsynch_destroy(&mon->gate);
+	xnsynch_destroy(&mon->drain);
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	cobalt_mark_deleted(mon);
+	xnlock_put_irqrestore(&nklock, s);
+
+	umm = &cobalt_ppd_get(pshared)->umm;
+	cobalt_umm_free(umm, mon->state);
+	xnfree(mon);
+}
+++ linux-patched/kernel/xenomai/posix/syscall32.h	2022-03-21 12:58:28.980892950 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/gen-syscall-entries.sh	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SYSCALL32_H
+#define _COBALT_POSIX_SYSCALL32_H
+
+#include <cobalt/kernel/compat.h>
+
+struct cobalt_mutex_shadow;
+struct cobalt_event_shadow;
+struct cobalt_cond_shadow;
+struct cobalt_sem_shadow;
+struct cobalt_monitor_shadow;
+
+COBALT_SYSCALL32emu_DECL(thread_create,
+			 (compat_ulong_t pth,
+			  int policy,
+			  const struct compat_sched_param_ex __user *u_param_ex,
+			  int xid,
+			  __u32 __user *u_winoff));
+
+COBALT_SYSCALL32emu_DECL(thread_setschedparam_ex,
+			 (compat_ulong_t pth,
+			  int policy,
+			  const struct compat_sched_param_ex __user *u_param,
+			  __u32 __user *u_winoff,
+			  int __user *u_promoted));
+
+COBALT_SYSCALL32emu_DECL(thread_getschedparam_ex,
+			 (compat_ulong_t pth,
+			  int __user *u_policy,
+			  struct compat_sched_param_ex __user *u_param));
+
+COBALT_SYSCALL32emu_DECL(thread_setschedprio,
+			 (compat_ulong_t pth,
+			  int prio,
+			  __u32 __user *u_winoff,
+			  int __user *u_promoted));
+
+COBALT_SYSCALL32emu_DECL(clock_getres,
+			 (clockid_t clock_id,
+			  struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_getres64,
+			 (clockid_t clock_id,
+			  struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_gettime,
+			 (clockid_t clock_id,
+			  struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_gettime64,
+			 (clockid_t clock_id,
+			  struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_settime,
+			 (clockid_t clock_id,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_settime64,
+			 (clockid_t clock_id,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_adjtime,
+			 (clockid_t clock_id,
+			  struct old_timex32 __user *u_tx));
+
+COBALT_SYSCALL32emu_DECL(clock_adjtime64,
+			 (clockid_t clock_id,
+			  struct __kernel_timex __user *u_tx));
+
+COBALT_SYSCALL32emu_DECL(clock_nanosleep,
+			 (clockid_t clock_id, int flags,
+			  const struct old_timespec32 __user *u_rqt,
+			  struct old_timespec32 __user *u_rmt));
+
+COBALT_SYSCALL32emu_DECL(clock_nanosleep64,
+			 (clockid_t clock_id, int flags,
+			  const struct __kernel_timespec __user *u_rqt,
+			  struct __kernel_timespec __user *u_rmt));
+
+
+COBALT_SYSCALL32emu_DECL(mutex_timedlock,
+			 (struct cobalt_mutex_shadow __user *u_mx,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mutex_timedlock64,
+			 (struct cobalt_mutex_shadow __user *u_mx,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(cond_wait_prologue,
+			 (struct cobalt_cond_shadow __user *u_cnd,
+			  struct cobalt_mutex_shadow __user *u_mx,
+			  int *u_err,
+			  unsigned int timed,
+			  struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_open,
+			 (const char __user *u_name, int oflags,
+			  mode_t mode, struct compat_mq_attr __user *u_attr));
+
+COBALT_SYSCALL32emu_DECL(mq_getattr,
+			 (mqd_t uqd, struct compat_mq_attr __user *u_attr));
+
+COBALT_SYSCALL32emu_DECL(mq_timedsend,
+			 (mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_timedsend64,
+			 (mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_timedreceive,
+			 (mqd_t uqd, void __user *u_buf,
+			  compat_ssize_t __user *u_len,
+			  unsigned int __user *u_prio,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_timedreceive64,
+			 (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+			  unsigned int __user *u_prio,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_notify,
+			 (mqd_t fd, const struct compat_sigevent *__user u_cev));
+
+COBALT_SYSCALL32emu_DECL(sched_weightprio,
+			 (int policy,
+			  const struct compat_sched_param_ex __user *u_param));
+
+COBALT_SYSCALL32emu_DECL(sched_setconfig_np,
+			 (int cpu, int policy,
+			  union compat_sched_config __user *u_config,
+			  size_t len));
+
+COBALT_SYSCALL32emu_DECL(sched_getconfig_np,
+			 (int cpu, int policy,
+			  union compat_sched_config __user *u_config,
+			  size_t len));
+
+COBALT_SYSCALL32emu_DECL(sched_setscheduler_ex,
+			 (compat_pid_t pid,
+			  int policy,
+			  const struct compat_sched_param_ex __user *u_param,
+			  __u32 __user *u_winoff,
+			  int __user *u_promoted));
+
+COBALT_SYSCALL32emu_DECL(sched_getscheduler_ex,
+			 (compat_pid_t pid,
+			  int __user *u_policy,
+			  struct compat_sched_param_ex __user *u_param));
+
+COBALT_SYSCALL32emu_DECL(timer_create,
+			 (clockid_t clock,
+			  const struct compat_sigevent __user *u_sev,
+			  timer_t __user *u_tm));
+
+COBALT_SYSCALL32emu_DECL(timer_settime,
+			 (timer_t tm, int flags,
+			  const struct old_itimerspec32 __user *u_newval,
+			  struct old_itimerspec32 __user *u_oldval));
+
+COBALT_SYSCALL32emu_DECL(timer_gettime,
+			 (timer_t tm,
+			  struct old_itimerspec32 __user *u_val));
+
+COBALT_SYSCALL32emu_DECL(timerfd_settime,
+			 (int fd, int flags,
+			  const struct old_itimerspec32 __user *new_value,
+			  struct old_itimerspec32 __user *old_value));
+
+COBALT_SYSCALL32emu_DECL(timerfd_gettime,
+			 (int fd, struct old_itimerspec32 __user *value));
+
+COBALT_SYSCALL32emu_DECL(sigwait,
+			 (const compat_sigset_t __user *u_set,
+			  int __user *u_sig));
+
+COBALT_SYSCALL32emu_DECL(sigtimedwait,
+			 (const compat_sigset_t __user *u_set,
+			  struct compat_siginfo __user *u_si,
+			  const struct old_timespec32 __user *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(sigtimedwait64,
+			 (const compat_sigset_t __user *u_set,
+			  struct compat_siginfo __user *u_si,
+			  const struct __kernel_timespec __user *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(sigwaitinfo,
+			 (const compat_sigset_t __user *u_set,
+			  struct compat_siginfo __user *u_si));
+
+COBALT_SYSCALL32emu_DECL(sigpending,
+			 (compat_old_sigset_t __user *u_set));
+
+COBALT_SYSCALL32emu_DECL(sigqueue,
+			 (pid_t pid, int sig,
+			  const union compat_sigval __user *u_value));
+
+COBALT_SYSCALL32emu_DECL(monitor_wait,
+			 (struct cobalt_monitor_shadow __user *u_mon,
+			  int event, const struct old_timespec32 __user *u_ts,
+			  int __user *u_ret));
+
+COBALT_SYSCALL32emu_DECL(monitor_wait64,
+			 (struct cobalt_monitor_shadow __user *u_mon,
+			  int event,
+			  const struct __kernel_timespec __user *u_ts,
+			  int __user *u_ret));
+
+COBALT_SYSCALL32emu_DECL(event_wait,
+			 (struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits,
+			  unsigned int __user *u_bits_r,
+			  int mode, const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(event_wait64,
+			 (struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits,
+			  unsigned int __user *u_bits_r,
+			  int mode,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(select,
+			 (int nfds,
+			  compat_fd_set __user *u_rfds,
+			  compat_fd_set __user *u_wfds,
+			  compat_fd_set __user *u_xfds,
+			  struct old_timeval32 __user *u_tv));
+
+COBALT_SYSCALL32emu_DECL(recvmsg,
+			 (int fd, struct compat_msghdr __user *umsg,
+			  int flags));
+
+COBALT_SYSCALL32emu_DECL(recvmmsg,
+			 (int fd, struct compat_mmsghdr __user *u_msgvec,
+			  unsigned int vlen,
+			  unsigned int flags, struct old_timespec32 *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(recvmmsg64,
+			 (int fd, struct compat_mmsghdr __user *u_msgvec,
+			  unsigned int vlen,
+			  unsigned int flags,
+			  struct __kernel_timespec *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(sendmsg,
+			 (int fd, struct compat_msghdr __user *umsg,
+			  int flags));
+
+COBALT_SYSCALL32emu_DECL(sendmmsg,
+			 (int fd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen,
+			  unsigned int flags));
+
+COBALT_SYSCALL32emu_DECL(mmap,
+			 (int fd,
+			  struct compat_rtdm_mmap_request __user *u_rma,
+			  compat_uptr_t __user *u_addrp));
+
+COBALT_SYSCALL32emu_DECL(backtrace,
+			 (int nr, compat_ulong_t __user *u_backtrace,
+			  int reason));
+
+COBALT_SYSCALL32emu_DECL(sem_open,
+			 (compat_uptr_t __user *u_addrp,
+			  const char __user *u_name,
+			  int oflags, mode_t mode, unsigned int value));
+
+COBALT_SYSCALL32emu_DECL(sem_timedwait,
+			 (struct cobalt_sem_shadow __user *u_sem,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(sem_timedwait64,
+			 (struct cobalt_sem_shadow __user *u_sem,
+			  const struct __kernel_timespec __user *u_ts));
+
+#endif /* !_COBALT_POSIX_SYSCALL32_H */
+++ linux-patched/kernel/xenomai/posix/gen-syscall-entries.sh	2022-03-21 12:58:28.977892979 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/mutex.c	1970-01-01 01:00:00.000000000 +0100
+#! /bin/sh
+
+set -e
+
+shift
+
+awk '
+match($0, /COBALT_SYSCALL\([^,]*,[ \t]*[^,]*/)  {
+	str=substr($0, RSTART + 15, RLENGTH - 15)
+	match(str, /[^, \t]*/)
+	syscall=substr(str, RSTART, RLENGTH)
+
+	if (syscall == "") {
+		print "Failed to find syscall name in line " $0 > "/dev/stderr"
+		exit 1
+	}
+
+	calls = calls "	__COBALT_CALL_ENTRY(" syscall ") \\\n"
+	modes = modes "	__COBALT_MODE(" str ") \\\n"
+	next
+}
+
+/COBALT_SYSCALL\(/  {
+	print "Failed to parse line " $0 > "/dev/stderr"
+	exit 1
+}
+
+END {
+	print "#define __COBALT_CALL_ENTRIES \\\n" calls "	/* end */"
+	print "#define __COBALT_CALL_MODES \\\n" modes "	/* end */"
+}
+' $*
+++ linux-patched/kernel/xenomai/posix/mutex.c	2022-03-21 12:58:28.973893018 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/io.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "internal.h"
+#include "thread.h"
+#include "mutex.h"
+#include "cond.h"
+#include "clock.h"
+#include <cobalt/kernel/time.h>
+
+static int cobalt_mutex_init_inner(struct cobalt_mutex_shadow *shadow,
+				   struct cobalt_mutex *mutex,
+				   struct cobalt_mutex_state *state,
+				   const struct cobalt_mutexattr *attr)
+{
+	int synch_flags = XNSYNCH_PRIO | XNSYNCH_OWNER;
+	struct cobalt_umm *umm;
+	spl_t s;
+	int ret;
+
+	ret = xnregistry_enter_anon(mutex, &mutex->resnode.handle);
+	if (ret < 0)
+		return ret;
+
+	umm = &cobalt_ppd_get(attr->pshared)->umm;
+	shadow->handle = mutex->resnode.handle;
+	shadow->magic = COBALT_MUTEX_MAGIC;
+	shadow->lockcnt = 0;
+	shadow->attr = *attr;
+	shadow->state_offset = cobalt_umm_offset(umm, state);
+
+	mutex->magic = COBALT_MUTEX_MAGIC;
+
+	if (attr->protocol == PTHREAD_PRIO_PROTECT) {
+		state->ceiling = attr->ceiling + 1;
+		xnsynch_init_protect(&mutex->synchbase, synch_flags,
+				     &state->owner, &state->ceiling);
+	} else {
+		state->ceiling = 0;
+		if (attr->protocol == PTHREAD_PRIO_INHERIT)
+			synch_flags |= XNSYNCH_PI;
+		xnsynch_init(&mutex->synchbase, synch_flags, &state->owner);
+	}
+
+	state->flags = (attr->type == PTHREAD_MUTEX_ERRORCHECK
+			? COBALT_MUTEX_ERRORCHECK : 0);
+	mutex->attr = *attr;
+	INIT_LIST_HEAD(&mutex->conds);
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_add_resource(&mutex->resnode, mutex, attr->pshared);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+/* must be called with nklock locked, interrupts off. */
+int __cobalt_mutex_acquire_unchecked(struct xnthread *cur,
+				     struct cobalt_mutex *mutex,
+				     const struct timespec64 *ts)
+{
+	int ret;
+
+	if (ts) {
+		if (!timespec64_valid(ts))
+			return -EINVAL;
+		ret = xnsynch_acquire(&mutex->synchbase, ts2ns(ts) + 1, XN_REALTIME);
+	} else
+		ret = xnsynch_acquire(&mutex->synchbase, XN_INFINITE, XN_RELATIVE);
+
+	if (ret) {
+		if (ret & XNBREAK)
+			return -EINTR;
+		if (ret & XNTIMEO)
+			return -ETIMEDOUT;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cobalt_mutex_release(struct xnthread *curr,
+			 struct cobalt_mutex *mutex)
+{	/* nklock held, irqs off */
+	struct cobalt_mutex_state *state;
+	struct cobalt_cond *cond;
+	unsigned long flags;
+	int need_resched;
+
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex))
+		 return -EINVAL;
+
+	if (mutex->resnode.scope !=
+	    cobalt_current_resources(mutex->attr.pshared))
+		return -EPERM;
+
+	/*
+	 * We are about to release a mutex which is still pending PP
+	 * (i.e. we never got scheduled out while holding it). Clear
+	 * the lazy handle.
+	 */
+	if (mutex->resnode.handle == curr->u_window->pp_pending)
+		curr->u_window->pp_pending = XN_NO_HANDLE;
+
+	state = container_of(mutex->synchbase.fastlock, struct cobalt_mutex_state, owner);
+	flags = state->flags;
+	need_resched = 0;
+	if ((flags & COBALT_MUTEX_COND_SIGNAL)) {
+		state->flags = flags & ~COBALT_MUTEX_COND_SIGNAL;
+		if (!list_empty(&mutex->conds)) {
+			list_for_each_entry(cond, &mutex->conds, mutex_link)
+				need_resched |=
+				cobalt_cond_deferred_signals(cond);
+		}
+	}
+	need_resched |= xnsynch_release(&mutex->synchbase, curr);
+
+	return need_resched;
+}
+
+int __cobalt_mutex_timedlock_break(struct cobalt_mutex_shadow __user *u_mx,
+				   const void __user *u_ts,
+				   int (*fetch_timeout)(struct timespec64 *ts,
+							const void __user *u_ts))
+{
+	struct xnthread *curr = xnthread_current();
+	struct timespec64 ts, *tsp = NULL;
+	struct cobalt_mutex *mutex;
+	xnhandle_t handle;
+	spl_t s;
+	int ret;
+
+	/* We need a valid thread handle for the fast lock. */
+	if (curr->handle == XN_NO_HANDLE)
+		return -EPERM;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+redo:
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(handle, NULL);
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (mutex->resnode.scope !=
+	    cobalt_current_resources(mutex->attr.pshared)) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	xnthread_commit_ceiling(curr);
+
+	if (xnsynch_owner_check(&mutex->synchbase, curr)) {
+		/* Check if we can take the mutex immediately */
+		ret = xnsynch_try_acquire(&mutex->synchbase);
+		if (ret != -EBUSY)
+			goto out;
+
+		if (fetch_timeout) {
+			xnlock_put_irqrestore(&nklock, s);
+			ret = fetch_timeout(&ts, u_ts);
+			if (ret)
+				return ret;
+
+			fetch_timeout = NULL;
+			tsp = &ts;
+			goto redo; /* Revalidate handle. */
+		}
+		ret = __cobalt_mutex_acquire_unchecked(curr, mutex, tsp);
+		xnlock_put_irqrestore(&nklock, s);
+		return ret;
+	}
+
+	/* We already own the mutex, something looks wrong. */
+
+	ret = -EBUSY;
+	switch(mutex->attr.type) {
+	case PTHREAD_MUTEX_NORMAL:
+		/* Attempting to relock a normal mutex, deadlock. */
+		if (IS_ENABLED(XENO_OPT_DEBUG_USER))
+			printk(XENO_WARNING
+			       "thread %s deadlocks on non-recursive mutex\n",
+			       curr->name);
+		/* Make the caller hang. */
+		__cobalt_mutex_acquire_unchecked(curr, mutex, NULL);
+		break;
+
+	case PTHREAD_MUTEX_ERRORCHECK:
+	case PTHREAD_MUTEX_RECURSIVE:
+		/*
+		 * Recursive mutexes are handled in user-space, so
+		 * these cases should never happen.
+		 */
+		ret = -EINVAL;
+		break;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(mutex_check_init, current,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct cobalt_mutex *mutex;
+	xnhandle_t handle;
+	int err;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+	mutex = xnregistry_lookup(handle, NULL);
+	if (cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex)))
+		/* mutex is already in a queue. */
+		err = -EBUSY;
+	else
+		err = 0;
+
+	xnlock_put_irqrestore(&nklock, s);
+	return err;
+}
+
+COBALT_SYSCALL(mutex_init, current,
+	       (struct cobalt_mutex_shadow __user *u_mx,
+		const struct cobalt_mutexattr __user *u_attr))
+{
+	struct cobalt_mutex_state *state;
+	struct cobalt_mutex_shadow mx;
+	struct cobalt_mutexattr attr;
+	struct cobalt_mutex *mutex;
+	int ret;
+
+	if (cobalt_copy_from_user(&mx, u_mx, sizeof(mx)))
+		return -EFAULT;
+
+	if (cobalt_copy_from_user(&attr, u_attr, sizeof(attr)))
+		return -EFAULT;
+
+	mutex = xnmalloc(sizeof(*mutex));
+	if (mutex == NULL)
+		return -ENOMEM;
+
+	state = cobalt_umm_alloc(&cobalt_ppd_get(attr.pshared)->umm,
+				 sizeof(*state));
+	if (state == NULL) {
+		xnfree(mutex);
+		return -EAGAIN;
+	}
+
+	ret = cobalt_mutex_init_inner(&mx, mutex, state, &attr);
+	if (ret) {
+		xnfree(mutex);
+		cobalt_umm_free(&cobalt_ppd_get(attr.pshared)->umm, state);
+		return ret;
+	}
+
+	return cobalt_copy_to_user(u_mx, &mx, sizeof(*u_mx));
+}
+
+COBALT_SYSCALL(mutex_destroy, current,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct cobalt_mutex_shadow mx;
+	struct cobalt_mutex *mutex;
+	spl_t s;
+	int ret;
+
+	if (cobalt_copy_from_user(&mx, u_mx, sizeof(mx)))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(mx.handle, NULL);
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex))) {
+		ret = -EINVAL;
+		goto fail;
+	}
+	if (cobalt_current_resources(mutex->attr.pshared) !=
+	    mutex->resnode.scope) {
+		ret = -EPERM;
+		goto fail;
+	}
+	if (xnsynch_fast_owner_check(mutex->synchbase.fastlock,
+					XN_NO_HANDLE) != 0 ||
+	    !list_empty(&mutex->conds)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	cobalt_mutex_reclaim(&mutex->resnode, s); /* drops lock */
+
+	cobalt_mark_deleted(&mx);
+
+	return cobalt_copy_to_user(u_mx, &mx, sizeof(*u_mx));
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(mutex_trylock, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct xnthread *curr = xnthread_current();
+	struct cobalt_mutex *mutex;
+	xnhandle_t handle;
+	spl_t s;
+	int ret;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(handle, NULL);
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex))) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	xnthread_commit_ceiling(curr);
+
+	ret = xnsynch_try_acquire(&mutex->synchbase);
+
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(mutex_lock, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	return __cobalt_mutex_timedlock_break(u_mx, NULL, NULL);
+}
+
+static inline int mutex_fetch_timeout(struct timespec64 *ts,
+				      const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts);
+}
+
+static inline int mutex_fetch_timeout64(struct timespec64 *ts,
+					const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts);
+}
+
+int __cobalt_mutex_timedlock64(struct cobalt_mutex_shadow __user *u_mx,
+			       const void __user *u_ts)
+{
+	return __cobalt_mutex_timedlock_break(u_mx, u_ts,
+					      mutex_fetch_timeout64);
+}
+
+COBALT_SYSCALL(mutex_timedlock, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx,
+		const struct __user_old_timespec __user *u_ts))
+{
+	return __cobalt_mutex_timedlock_break(u_mx, u_ts, mutex_fetch_timeout);
+}
+
+COBALT_SYSCALL(mutex_timedlock64, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx,
+		const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mutex_timedlock64(u_mx, u_ts);
+}
+
+COBALT_SYSCALL(mutex_unlock, nonrestartable,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct cobalt_mutex *mutex;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(handle, NULL);
+	ret = cobalt_mutex_release(curr, mutex);
+	if (ret > 0) {
+		xnsched_run();
+		ret = 0;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+void cobalt_mutex_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_mutex_state *state;
+	struct cobalt_mutex *mutex;
+	int pshared;
+
+	mutex = container_of(node, struct cobalt_mutex, resnode);
+	state = container_of(mutex->synchbase.fastlock, struct cobalt_mutex_state, owner);
+	pshared = mutex->attr.pshared;
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	xnsynch_destroy(&mutex->synchbase);
+	cobalt_mark_deleted(mutex);
+	xnlock_put_irqrestore(&nklock, s);
+
+	cobalt_umm_free(&cobalt_ppd_get(pshared)->umm, state);
+	xnfree(mutex);
+}
+
+struct xnsynch *lookup_lazy_pp(xnhandle_t handle)
+{				/* nklock held, irqs off */
+	struct cobalt_mutex *mutex;
+
+	/* Only mutexes may be PP-enabled. */
+	
+	mutex = xnregistry_lookup(handle, NULL);
+	if (mutex == NULL ||
+	    !cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex) ||
+	    mutex->attr.protocol != PTHREAD_PRIO_PROTECT)
+		return NULL;
+
+	return &mutex->synchbase;
+}
+++ linux-patched/kernel/xenomai/posix/io.h	2022-03-21 12:58:28.969893057 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/monitor.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_IO_H
+#define _COBALT_POSIX_IO_H
+
+#include <rtdm/rtdm.h>
+#include <xenomai/posix/syscall.h>
+#include <cobalt/kernel/select.h>
+
+int __cobalt_select(int nfds, void __user *u_rfds, void __user *u_wfds,
+		    void __user *u_xfds, void __user *u_tv, bool compat);
+
+COBALT_SYSCALL_DECL(open,
+		    (const char __user *u_path, int oflag));
+
+COBALT_SYSCALL_DECL(socket,
+		    (int protocol_family,
+		     int socket_type, int protocol));
+
+COBALT_SYSCALL_DECL(close, (int fd));
+
+COBALT_SYSCALL_DECL(fcntl, (int fd, int cmd, long arg));
+
+COBALT_SYSCALL_DECL(ioctl,
+		    (int fd, unsigned int request, void __user *arg));
+
+COBALT_SYSCALL_DECL(read,
+		    (int fd, void __user *buf, size_t size));
+
+COBALT_SYSCALL_DECL(write,
+		    (int fd, const void __user *buf, size_t size));
+
+COBALT_SYSCALL_DECL(recvmsg,
+		    (int fd, struct user_msghdr __user *umsg, int flags));
+
+COBALT_SYSCALL_DECL(recvmmsg,
+		    (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		     unsigned int flags, struct __user_old_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(recvmmsg64,
+		    (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		     unsigned int flags,
+		     struct __kernel_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(sendmsg,
+		    (int fd, struct user_msghdr __user *umsg, int flags));
+
+COBALT_SYSCALL_DECL(sendmmsg,
+		    (int fd, struct mmsghdr __user *u_msgvec,
+		     unsigned int vlen, unsigned int flags));
+
+COBALT_SYSCALL_DECL(mmap,
+		    (int fd, struct _rtdm_mmap_request __user *u_rma,
+		     void __user * __user *u_addrp));
+
+COBALT_SYSCALL_DECL(select,
+		    (int nfds,
+		     fd_set __user *u_rfds,
+		     fd_set __user *u_wfds,
+		     fd_set __user *u_xfds,
+		     struct __kernel_old_timeval __user *u_tv));
+
+#endif /* !_COBALT_POSIX_IO_H */
+++ linux-patched/kernel/xenomai/posix/monitor.h	2022-03-21 12:58:28.966893086 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/cond.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_MONITOR_H
+#define _COBALT_POSIX_MONITOR_H
+
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/monitor.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_resources;
+struct cobalt_process;
+
+struct cobalt_monitor {
+	unsigned int magic;
+	struct xnsynch gate;
+	struct xnsynch drain;
+	struct cobalt_monitor_state *state;
+	struct list_head waiters;
+	int flags;
+	xntmode_t tmode;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_monitor_wait(struct cobalt_monitor_shadow __user *u_mon,
+			  int event, const struct timespec64 *ts,
+			  int __user *u_ret);
+
+int __cobalt_monitor_wait64(struct cobalt_monitor_shadow __user *u_mon,
+			    int event,
+			    const struct __kernel_timespec __user *u_ts,
+			    int __user *u_ret);
+
+COBALT_SYSCALL_DECL(monitor_init,
+		    (struct cobalt_monitor_shadow __user *u_monsh,
+		     clockid_t clk_id,
+		     int flags));
+
+COBALT_SYSCALL_DECL(monitor_enter,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+COBALT_SYSCALL_DECL(monitor_sync,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+COBALT_SYSCALL_DECL(monitor_exit,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+COBALT_SYSCALL_DECL(monitor_wait,
+		    (struct cobalt_monitor_shadow __user *u_monsh,
+		     int event, const struct __user_old_timespec __user *u_ts,
+		     int __user *u_ret));
+
+COBALT_SYSCALL_DECL(monitor_wait64,
+		    (struct cobalt_monitor_shadow __user *u_monsh, int event,
+		     const struct __kernel_timespec __user *u_ts,
+		     int __user *u_ret));
+
+COBALT_SYSCALL_DECL(monitor_destroy,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+void cobalt_monitor_reclaim(struct cobalt_resnode *node,
+			    spl_t s);
+
+#endif /* !_COBALT_POSIX_MONITOR_H */
+++ linux-patched/kernel/xenomai/posix/cond.h	2022-03-21 12:58:28.962893125 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_COND_H
+#define _COBALT_POSIX_COND_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/thread.h>
+#include <cobalt/uapi/cond.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_mutex;
+
+struct cobalt_cond {
+	unsigned int magic;
+	struct xnsynch synchbase;
+	struct list_head mutex_link;
+	struct cobalt_cond_state *state;
+	struct cobalt_condattr attr;
+	struct cobalt_mutex *mutex;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow __user *u_cnd,
+				struct cobalt_mutex_shadow __user *u_mx,
+				int *u_err,
+				void __user *u_ts,
+				int (*fetch_timeout)(struct timespec64 *ts,
+						     const void __user *u_ts));
+COBALT_SYSCALL_DECL(cond_init,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     const struct cobalt_condattr __user *u_attr));
+
+COBALT_SYSCALL_DECL(cond_destroy,
+		    (struct cobalt_cond_shadow __user *u_cnd));
+
+COBALT_SYSCALL_DECL(cond_wait_prologue,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     struct cobalt_mutex_shadow __user *u_mx,
+		     int *u_err,
+		     unsigned int timed,
+		     struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(cond_wait_epilogue,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     struct cobalt_mutex_shadow __user *u_mx));
+
+int cobalt_cond_deferred_signals(struct cobalt_cond *cond);
+
+void cobalt_cond_reclaim(struct cobalt_resnode *node,
+			 spl_t s);
+
+#endif /* !_COBALT_POSIX_COND_H */
+++ linux-patched/kernel/xenomai/posix/thread.h	2022-03-21 12:58:28.958893164 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/memory.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_THREAD_H
+#define _COBALT_POSIX_THREAD_H
+
+#include <linux/stdarg.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/signal.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/uapi/thread.h>
+#include <cobalt/uapi/sched.h>
+/* CAUTION: rtdm/cobalt.h reads this header. */
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/extension.h>
+
+#define PTHREAD_PROCESS_PRIVATE 0
+#define PTHREAD_PROCESS_SHARED  1
+
+#define PTHREAD_CREATE_JOINABLE 0
+#define PTHREAD_CREATE_DETACHED 1
+
+#define PTHREAD_INHERIT_SCHED  0
+#define PTHREAD_EXPLICIT_SCHED 1
+
+#define PTHREAD_MUTEX_NORMAL     0
+#define PTHREAD_MUTEX_RECURSIVE  1
+#define PTHREAD_MUTEX_ERRORCHECK 2
+#define PTHREAD_MUTEX_DEFAULT    0
+
+struct cobalt_thread;
+struct cobalt_threadstat;
+
+/*
+ * pthread_mutexattr_t and pthread_condattr_t fit on 32 bits, for
+ * compatibility with libc.
+ */
+
+/* The following definitions are copied from linuxthread pthreadtypes.h. */
+struct _pthread_fastlock {
+	long int __status;
+	int __spinlock;
+};
+
+typedef struct {
+	struct _pthread_fastlock __c_lock;
+	long __c_waiting;
+	char __padding[48 - sizeof (struct _pthread_fastlock)
+		       - sizeof (long) - sizeof (long long)];
+	long long __align;
+} pthread_cond_t;
+
+enum {
+	PTHREAD_PRIO_NONE,
+	PTHREAD_PRIO_INHERIT,
+	PTHREAD_PRIO_PROTECT
+};
+
+typedef struct {
+	int __m_reserved;
+	int __m_count;
+	long __m_owner;
+	int __m_kind;
+	struct _pthread_fastlock __m_lock;
+} pthread_mutex_t;
+
+struct cobalt_local_hkey {
+	/** pthread_t from userland. */
+	unsigned long u_pth;
+	/** kernel mm context. */
+	struct mm_struct *mm;
+};
+
+struct cobalt_thread {
+	unsigned int magic;
+	struct xnthread threadbase;
+	struct cobalt_extref extref;
+	struct cobalt_process *process;
+	struct list_head next;	/* in global/process thread_list */
+
+	/** Signal management. */
+	sigset_t sigpending;
+	struct list_head sigqueues[_NSIG]; /* in cobalt_sigpending */
+	struct xnsynch sigwait;
+	struct list_head signext;
+
+	/** Monitor wait object and link holder. */
+	struct xnsynch monitor_synch;
+	struct list_head monitor_link;
+
+	struct cobalt_local_hkey hkey;
+};
+
+struct cobalt_sigwait_context {
+	struct xnthread_wait_context wc;
+	sigset_t *set;
+	struct siginfo *si;
+};
+
+static inline struct cobalt_thread *cobalt_current_thread(void)
+{
+	struct xnthread *curr = xnthread_current();
+	return curr ? container_of(curr, struct cobalt_thread, threadbase) : NULL;
+}
+
+int __cobalt_thread_create(unsigned long pth, int policy,
+			   struct sched_param_ex __user *u_param,
+			   int xid, __u32 __user *u_winoff);
+
+int __cobalt_thread_setschedparam_ex(struct cobalt_thread *thread, int policy,
+				     const struct sched_param_ex *param_ex);
+
+int cobalt_thread_setschedparam_ex(unsigned long pth,
+				   int policy,
+				   const struct sched_param_ex *param_ex,
+				   __u32 __user *u_winoff,
+				   int __user *u_promoted);
+
+int cobalt_thread_getschedparam_ex(unsigned long pth,
+				   int *policy_r,
+				   struct sched_param_ex *param_ex);
+
+int __cobalt_thread_getschedparam_ex(struct cobalt_thread *thread,
+				     int *policy_r,
+				     struct sched_param_ex *param_ex);
+
+int cobalt_thread_setschedprio(unsigned long pth,
+			       int prio,
+			       __u32 __user *u_winoff,
+			       int __user *u_promoted);
+
+struct cobalt_thread *cobalt_thread_find(pid_t pid);
+
+struct cobalt_thread *cobalt_thread_find_local(pid_t pid);
+
+struct cobalt_thread *cobalt_thread_lookup(unsigned long pth);
+
+COBALT_SYSCALL_DECL(thread_create,
+		    (unsigned long pth, int policy,
+		     struct sched_param_ex __user *u_param,
+		     int xid, __u32 __user *u_winoff));
+
+struct cobalt_thread *
+cobalt_thread_shadow(struct cobalt_local_hkey *lhkey,
+		     __u32 __user *u_winoff);
+
+COBALT_SYSCALL_DECL(thread_setmode,
+		    (int clrmask, int setmask, int __user *u_mode_r));
+
+COBALT_SYSCALL_DECL(thread_setname,
+		    (unsigned long pth, const char __user *u_name));
+
+COBALT_SYSCALL_DECL(thread_kill, (unsigned long pth, int sig));
+
+COBALT_SYSCALL_DECL(thread_join, (unsigned long pth));
+
+COBALT_SYSCALL_DECL(thread_getpid, (unsigned long pth));
+
+COBALT_SYSCALL_DECL(thread_getstat,
+		    (pid_t pid, struct cobalt_threadstat __user *u_stat));
+
+COBALT_SYSCALL_DECL(thread_setschedparam_ex,
+		    (unsigned long pth,
+		     int policy,
+		     const struct sched_param_ex __user *u_param,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted));
+
+COBALT_SYSCALL_DECL(thread_getschedparam_ex,
+		    (unsigned long pth,
+		     int __user *u_policy,
+		     struct sched_param_ex __user *u_param));
+
+COBALT_SYSCALL_DECL(thread_setschedprio,
+		    (unsigned long pth,
+		     int prio,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted));
+
+void cobalt_thread_map(struct xnthread *curr);
+
+struct xnthread_personality *cobalt_thread_exit(struct xnthread *curr);
+
+struct xnthread_personality *cobalt_thread_finalize(struct xnthread *zombie);
+
+#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION
+
+int cobalt_thread_extend(struct cobalt_extension *ext,
+			 void *priv);
+
+void cobalt_thread_restrict(void);
+
+static inline
+int cobalt_thread_extended_p(const struct cobalt_thread *thread,
+			     const struct cobalt_extension *ext)
+{
+	return thread->extref.extension == ext;
+}
+
+#else /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+static inline
+int cobalt_thread_extended_p(const struct cobalt_thread *thread,
+			     const struct cobalt_extension *ext)
+{
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+extern xnticks_t cobalt_time_slice;
+
+#endif /* !_COBALT_POSIX_THREAD_H */
+++ linux-patched/kernel/xenomai/posix/memory.h	2022-03-21 12:58:28.955893193 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/clock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_MEMORY_H
+#define _COBALT_POSIX_MEMORY_H
+
+#include <cobalt/kernel/ppd.h>
+
+#define cobalt_umm_set_name(__umm, __fmt, __args...)	\
+	xnheap_set_name(&(__umm)->heap, (__fmt), ## __args)
+
+static inline
+void *cobalt_umm_alloc(struct cobalt_umm *umm, __u32 size)
+{
+	return xnheap_alloc(&umm->heap, size);
+}
+
+static inline
+void *cobalt_umm_zalloc(struct cobalt_umm *umm, __u32 size)
+{
+	return xnheap_zalloc(&umm->heap, size);
+}
+
+static inline
+void cobalt_umm_free(struct cobalt_umm *umm, void *p)
+{
+	xnheap_free(&umm->heap, p);
+}
+
+static inline
+__u32 cobalt_umm_offset(struct cobalt_umm *umm, void *p)
+{
+	return p - xnheap_get_membase(&umm->heap);
+}
+
+int cobalt_memdev_init(void);
+
+void cobalt_memdev_cleanup(void);
+
+int cobalt_umm_init(struct cobalt_umm *umm, u32 size,
+		    void (*release)(struct cobalt_umm *umm));
+
+void cobalt_umm_destroy(struct cobalt_umm *umm);
+
+#endif /* !_COBALT_POSIX_MEMORY_H */
+++ linux-patched/kernel/xenomai/posix/clock.h	2022-03-21 12:58:28.951893232 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/posix/timer.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_CLOCK_H
+#define _COBALT_POSIX_CLOCK_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/cpumask.h>
+#include <cobalt/uapi/time.h>
+#include <xenomai/posix/syscall.h>
+
+#define ONE_BILLION             1000000000
+
+struct xnclock;
+
+static inline void ns2ts(struct timespec64 *ts, xnticks_t nsecs)
+{
+	ts->tv_sec = xnclock_divrem_billion(nsecs, &ts->tv_nsec);
+}
+
+static inline void u_ns2ts(struct __user_old_timespec *ts, xnticks_t nsecs)
+{
+	ts->tv_sec = xnclock_divrem_billion(nsecs, &ts->tv_nsec);
+}
+
+static inline xnticks_t ts2ns(const struct timespec64 *ts)
+{
+	xnticks_t nsecs = ts->tv_nsec;
+
+	if (ts->tv_sec)
+		nsecs += (xnticks_t)ts->tv_sec * ONE_BILLION;
+
+	return nsecs;
+}
+
+static inline xnticks_t u_ts2ns(const struct __user_old_timespec *ts)
+{
+	xnticks_t nsecs = ts->tv_nsec;
+
+	if (ts->tv_sec)
+		nsecs += (xnticks_t)ts->tv_sec * ONE_BILLION;
+
+	return nsecs;
+}
+
+static inline xnticks_t tv2ns(const struct __kernel_old_timeval *tv)
+{
+	xnticks_t nsecs = tv->tv_usec * 1000;
+
+	if (tv->tv_sec)
+		nsecs += (xnticks_t)tv->tv_sec * ONE_BILLION;
+
+	return nsecs;
+}
+
+static inline void ticks2tv(struct __kernel_old_timeval *tv, xnticks_t ticks)
+{
+	unsigned long nsecs;
+
+	tv->tv_sec = xnclock_divrem_billion(ticks, &nsecs);
+	tv->tv_usec = nsecs / 1000;
+}
+
+static inline xnticks_t clock_get_ticks(clockid_t clock_id)
+{
+	return clock_id == CLOCK_REALTIME ?
+		xnclock_read_realtime(&nkclock) :
+		xnclock_read_monotonic(&nkclock);
+}
+
+static inline int clock_flag(int flag, clockid_t clock_id)
+{
+	if ((flag & TIMER_ABSTIME) == 0)
+		return XN_RELATIVE;
+
+	if (clock_id == CLOCK_REALTIME)
+		return XN_REALTIME;
+
+	return XN_ABSOLUTE;
+}
+
+int __cobalt_clock_getres(clockid_t clock_id,
+			  struct timespec64 *ts);
+
+int __cobalt_clock_getres64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts);
+
+int __cobalt_clock_gettime(clockid_t clock_id,
+			   struct timespec64 *ts);
+
+int __cobalt_clock_gettime64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts);
+
+int __cobalt_clock_settime(clockid_t clock_id,
+			   const struct timespec64 *ts);
+
+int __cobalt_clock_settime64(clockid_t clock_id,
+			const struct __kernel_timespec __user *u_ts);
+
+int __cobalt_clock_adjtime(clockid_t clock_id,
+			   struct __kernel_timex *tx);
+
+int __cobalt_clock_adjtime64(clockid_t clock_id,
+			struct __kernel_timex __user *u_tx);
+
+int __cobalt_clock_nanosleep(clockid_t clock_id, int flags,
+			     const struct timespec64 *rqt,
+			     struct timespec64 *rmt);
+
+int __cobalt_clock_nanosleep64(clockid_t clock_id, int flags,
+		const struct __kernel_timespec __user *u_rqt,
+		struct __kernel_timespec __user *u_rmt);
+
+COBALT_SYSCALL_DECL(clock_getres,
+		    (clockid_t clock_id, struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_getres64,
+		    (clockid_t clock_id, struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_gettime,
+		    (clockid_t clock_id, struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_gettime64,
+		    (clockid_t clock_id, struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_settime,
+		    (clockid_t clock_id, const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_settime64,
+		    (clockid_t clock_id,
+			 const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_adjtime,
+		    (clockid_t clock_id, struct __user_old_timex __user *u_tx));
+
+COBALT_SYSCALL_DECL(clock_adjtime64,
+		    (clockid_t clock_id, struct __kernel_timex __user *u_tx));
+
+COBALT_SYSCALL_DECL(clock_nanosleep,
+		    (clockid_t clock_id, int flags,
+		     const struct __user_old_timespec __user *u_rqt,
+		     struct __user_old_timespec __user *u_rmt));
+
+COBALT_SYSCALL_DECL(clock_nanosleep64,
+		    (clockid_t clock_id, int flags,
+		     const struct __kernel_timespec __user *u_rqt,
+		     struct __kernel_timespec __user *u_rmt));
+
+int cobalt_clock_register(struct xnclock *clock,
+			  const cpumask_t *affinity,
+			  clockid_t *clk_id);
+
+void cobalt_clock_deregister(struct xnclock *clock);
+
+struct xnclock *cobalt_clock_find(clockid_t clock_id);
+
+extern DECLARE_BITMAP(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS);
+
+#endif /* !_COBALT_POSIX_CLOCK_H */
+++ linux-patched/kernel/xenomai/posix/timer.c	2022-03-21 12:58:28.948893262 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-tp.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include "internal.h"
+#include "thread.h"
+#include "timer.h"
+#include "clock.h"
+#include "signal.h"
+
+void cobalt_timer_handler(struct xntimer *xntimer)
+{
+	struct cobalt_timer *timer;
+	/*
+	 * Deliver the timer notification via a signal (unless
+	 * SIGEV_NONE was given). If we can't do this because the
+	 * target thread disappeared, then stop the timer. It will go
+	 * away when timer_delete() is called, or the owner's process
+	 * exits, whichever comes first.
+	 */
+	timer = container_of(xntimer, struct cobalt_timer, timerbase);
+	if (timer->sigp.si.si_signo &&
+	    cobalt_signal_send_pid(timer->target, &timer->sigp) == -ESRCH)
+		xntimer_stop(&timer->timerbase);
+}
+EXPORT_SYMBOL_GPL(cobalt_timer_handler);
+
+static inline struct cobalt_thread *
+timer_init(struct cobalt_timer *timer,
+	   const struct sigevent *__restrict__ evp) /* nklocked, IRQs off. */
+{
+	struct cobalt_thread *owner = cobalt_current_thread(), *target = NULL;
+	struct xnclock *clock;
+
+	/*
+	 * First, try to offload this operation to the extended
+	 * personality the current thread might originate from.
+	 */
+	if (cobalt_initcall_extension(timer_init, &timer->extref,
+				      owner, target, evp) && target)
+		return target;
+
+	/*
+	 * Ok, we have no extension available, or we do but it does
+	 * not want to overload the standard behavior: handle this
+	 * timer the pure Cobalt way then.
+	 */
+	if (evp == NULL || evp->sigev_notify == SIGEV_NONE) {
+		target = owner;	/* Assume SIGEV_THREAD_ID. */
+		goto init;
+	}
+
+	if (evp->sigev_notify != SIGEV_THREAD_ID)
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * Recipient thread must be a Xenomai shadow in user-space,
+	 * living in the same process than our caller.
+	 */
+	target = cobalt_thread_find_local(evp->sigev_notify_thread_id);
+	if (target == NULL)
+		return ERR_PTR(-EINVAL);
+init:
+	clock = cobalt_clock_find(timer->clockid);
+	if (IS_ERR(clock))
+		return ERR_PTR(PTR_ERR(clock));
+
+	xntimer_init(&timer->timerbase, clock, cobalt_timer_handler,
+		     target->threadbase.sched, XNTIMER_UGRAVITY);
+
+	return target;
+}
+
+static inline int timer_alloc_id(struct cobalt_process *cc)
+{
+	int id;
+
+	id = find_first_bit(cc->timers_map, CONFIG_XENO_OPT_NRTIMERS);
+	if (id == CONFIG_XENO_OPT_NRTIMERS)
+		return -EAGAIN;
+
+	__clear_bit(id, cc->timers_map);
+
+	return id;
+}
+
+static inline void timer_free_id(struct cobalt_process *cc, int id)
+{
+	__set_bit(id, cc->timers_map);
+}
+
+struct cobalt_timer *
+cobalt_timer_by_id(struct cobalt_process *cc, timer_t timer_id)
+{
+	if (timer_id < 0 || timer_id >= CONFIG_XENO_OPT_NRTIMERS)
+		return NULL;
+
+	if (test_bit(timer_id, cc->timers_map))
+		return NULL;
+
+	return cc->timers[timer_id];
+}
+
+static inline int timer_create(clockid_t clockid,
+			       const struct sigevent *__restrict__ evp,
+			       timer_t * __restrict__ timerid)
+{
+	struct cobalt_process *cc;
+	struct cobalt_thread *target;
+	struct cobalt_timer *timer;
+	int signo, ret = -EINVAL;
+	timer_t timer_id;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	timer = xnmalloc(sizeof(*timer));
+	if (timer == NULL)
+		return -ENOMEM;
+
+	timer->sigp.si.si_errno = 0;
+	timer->sigp.si.si_code = SI_TIMER;
+	timer->sigp.si.si_overrun = 0;
+	INIT_LIST_HEAD(&timer->sigp.next);
+	timer->clockid = clockid;
+	timer->overruns = 0;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	ret = timer_alloc_id(cc);
+	if (ret < 0)
+		goto out;
+
+	timer_id = ret;
+
+	if (evp == NULL) {
+		timer->sigp.si.si_int = timer_id;
+		signo = SIGALRM;
+	} else {
+		if (evp->sigev_notify == SIGEV_NONE)
+			signo = 0; /* Don't notify. */
+		else {
+			signo = evp->sigev_signo;
+			if (signo < 1 || signo > _NSIG) {
+				ret = -EINVAL;
+				goto fail;
+			}
+			timer->sigp.si.si_value = evp->sigev_value;
+		}
+	}
+
+	timer->sigp.si.si_signo = signo;
+	timer->sigp.si.si_tid = timer_id;
+	timer->id = timer_id;
+
+	target = timer_init(timer, evp);
+	if (target == NULL) {
+		ret = -EPERM;
+		goto fail;
+	}
+
+	if (IS_ERR(target)) {
+		ret = PTR_ERR(target);
+		goto fail;
+	}
+
+	timer->target = xnthread_host_pid(&target->threadbase);
+	cc->timers[timer_id] = timer;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	*timerid = timer_id;
+
+	return 0;
+fail:
+	timer_free_id(cc, timer_id);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnfree(timer);
+
+	return ret;
+}
+
+static void timer_cleanup(struct cobalt_process *p, struct cobalt_timer *timer)
+{
+	xntimer_destroy(&timer->timerbase);
+
+	if (!list_empty(&timer->sigp.next))
+		list_del(&timer->sigp.next);
+
+	timer_free_id(p, cobalt_timer_id(timer));
+	p->timers[cobalt_timer_id(timer)] = NULL;
+}
+
+static inline int
+timer_delete(timer_t timerid)
+{
+	struct cobalt_process *cc;
+	struct cobalt_timer *timer;
+	int ret = 0;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+	/*
+	 * If an extension runs and actually handles the deletion, we
+	 * should not call the timer_cleanup extension handler for
+	 * this timer, but we shall destroy the core timer. If the
+	 * handler returns on error, the whole deletion process is
+	 * aborted, leaving the timer untouched. In all other cases,
+	 * we do the core timer cleanup work, firing the timer_cleanup
+	 * extension handler if defined.
+	 */
+  	if (cobalt_call_extension(timer_delete, &timer->extref, ret) && ret < 0)
+		goto out;
+
+	if (ret == 0)
+		cobalt_call_extension(timer_cleanup, &timer->extref, ret);
+	else
+		ret = 0;
+
+	timer_cleanup(cc, timer);
+	xnlock_put_irqrestore(&nklock, s);
+	xnfree(timer);
+
+	return ret;
+
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+void __cobalt_timer_getval(struct xntimer *__restrict__ timer,
+			   struct itimerspec64 *__restrict__ value)
+{
+	ns2ts(&value->it_interval, xntimer_interval(timer));
+
+	if (!xntimer_running_p(timer)) {
+		value->it_value.tv_sec = 0;
+		value->it_value.tv_nsec = 0;
+	} else {
+		ns2ts(&value->it_value, xntimer_get_timeout(timer));
+	}
+}
+
+static inline void
+timer_gettimeout(struct cobalt_timer *__restrict__ timer,
+		 struct itimerspec64 *__restrict__ value)
+{
+	int ret = 0;
+
+	if (cobalt_call_extension(timer_gettime, &timer->extref,
+				  ret, value) && ret != 0)
+		return;
+
+	__cobalt_timer_getval(&timer->timerbase, value);
+}
+
+int __cobalt_timer_setval(struct xntimer *__restrict__ timer, int clock_flag,
+			  const struct itimerspec64 *__restrict__ value)
+{
+	xnticks_t start, period;
+
+	if (value->it_value.tv_nsec == 0 && value->it_value.tv_sec == 0) {
+		xntimer_stop(timer);
+		return 0;
+	}
+
+	if ((unsigned long)value->it_value.tv_nsec >= ONE_BILLION ||
+	    ((unsigned long)value->it_interval.tv_nsec >= ONE_BILLION &&
+	     (value->it_value.tv_sec != 0 || value->it_value.tv_nsec != 0)))
+		return -EINVAL;
+
+	start = ts2ns(&value->it_value) + 1;
+	period = ts2ns(&value->it_interval);
+
+	/*
+	 * Now start the timer. If the timeout data has already
+	 * passed, the caller will handle the case.
+	 */
+	return xntimer_start(timer, start, period, clock_flag);
+}
+
+static inline int timer_set(struct cobalt_timer *timer, int flags,
+			    const struct itimerspec64 *__restrict__ value)
+{				/* nklocked, IRQs off. */
+	struct cobalt_thread *thread;
+	int ret = 0;
+
+	/* First, try offloading the work to an extension. */
+
+	if (cobalt_call_extension(timer_settime, &timer->extref,
+				  ret, value, flags) && ret != 0)
+		return ret < 0 ? ret : 0;
+
+	/*
+	 * No extension, or operation not handled. Default to plain
+	 * POSIX behavior.
+	 *
+	 * If the target thread vanished, just don't start the timer.
+	 */
+	thread = cobalt_thread_find(timer->target);
+	if (thread == NULL)
+		return 0;
+
+	/*
+	 * Make the timer affine to the CPU running the thread to be
+	 * signaled if possible.
+	 */
+	xntimer_set_affinity(&timer->timerbase, thread->threadbase.sched);
+
+	return __cobalt_timer_setval(&timer->timerbase,
+				     clock_flag(flags, timer->clockid), value);
+}
+
+static inline void
+timer_deliver_late(struct cobalt_process *cc, timer_t timerid)
+{
+	struct cobalt_timer *timer;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	/*
+	 * We dropped the lock shortly, revalidate the timer handle in
+	 * case a deletion slipped in.
+	 */
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer)
+		cobalt_timer_handler(&timer->timerbase);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+int __cobalt_timer_settime(timer_t timerid, int flags,
+			   const struct itimerspec64 *__restrict__ value,
+			   struct itimerspec64 *__restrict__ ovalue)
+{
+	struct cobalt_timer *timer;
+	struct cobalt_process *cc;
+	int ret;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	XENO_BUG_ON(COBALT, cc == NULL);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (ovalue)
+		timer_gettimeout(timer, ovalue);
+
+	ret = timer_set(timer, flags, value);
+	if (ret == -ETIMEDOUT) {
+		/*
+		 * Time has already passed, deliver a notification
+		 * immediately. Since we are about to dive into the
+		 * signal machinery for this, let's drop the nklock to
+		 * break the atomic section temporarily.
+		 */
+		xnlock_put_irqrestore(&nklock, s);
+		timer_deliver_late(cc, timerid);
+		return 0;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_timer_gettime(timer_t timerid, struct itimerspec64 *value)
+{
+	struct cobalt_timer *timer;
+	struct cobalt_process *cc;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL)
+		goto fail;
+
+	timer_gettimeout(timer, value);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return -EINVAL;
+}
+
+COBALT_SYSCALL(timer_delete, current, (timer_t timerid))
+{
+	return timer_delete(timerid);
+}
+
+int __cobalt_timer_create(clockid_t clock,
+			  const struct sigevent *sev,
+			  timer_t __user *u_tm)
+{
+	timer_t timerid = 0;
+	int ret;
+
+	ret = timer_create(clock, sev, &timerid);
+	if (ret)
+		return ret;
+
+	if (cobalt_copy_to_user(u_tm, &timerid, sizeof(timerid))) {
+		timer_delete(timerid);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+COBALT_SYSCALL(timer_create, current,
+	       (clockid_t clock,
+		const struct sigevent __user *u_sev,
+		timer_t __user *u_tm))
+{
+	struct sigevent sev, *evp = NULL;
+
+	if (u_sev) {
+		evp = &sev;
+		if (cobalt_copy_from_user(&sev, u_sev, sizeof(sev)))
+			return -EFAULT;
+	}
+
+	return __cobalt_timer_create(clock, evp, u_tm);
+}
+
+COBALT_SYSCALL(timer_settime, primary,
+	       (timer_t tm, int flags,
+		const struct __user_old_itimerspec __user *u_newval,
+		struct __user_old_itimerspec __user *u_oldval))
+{
+	struct itimerspec64 newv, oldv, *oldvp = &oldv;
+	int ret;
+
+	if (u_oldval == NULL)
+		oldvp = NULL;
+
+	if (cobalt_get_u_itimerspec(&newv, u_newval))
+		return -EFAULT;
+
+	ret = __cobalt_timer_settime(tm, flags, &newv, oldvp);
+	if (ret)
+		return ret;
+
+	if (oldvp && cobalt_put_u_itimerspec(u_oldval, oldvp)) {
+		__cobalt_timer_settime(tm, flags, oldvp, NULL);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+COBALT_SYSCALL(timer_gettime, current,
+	       (timer_t tm, struct __user_old_itimerspec __user *u_val))
+{
+	struct itimerspec64 val;
+	int ret;
+
+	ret = __cobalt_timer_gettime(tm, &val);
+	if (ret)
+		return ret;
+
+	return cobalt_put_u_itimerspec(u_val, &val);
+}
+
+COBALT_SYSCALL(timer_getoverrun, current, (timer_t timerid))
+{
+	struct cobalt_timer *timer;
+	struct cobalt_process *cc;
+	int overruns;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL)
+		goto fail;
+
+	overruns = timer->overruns;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return overruns;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return -EINVAL;
+}
+
+int cobalt_timer_deliver(struct cobalt_thread *waiter, timer_t timerid) /* nklocked, IRQs off. */
+{
+	struct cobalt_timer *timer;
+	xnticks_t now;
+
+	timer = cobalt_timer_by_id(cobalt_current_process(), timerid);
+	if (timer == NULL)
+		/* Killed before ultimate delivery, who cares then? */
+		return 0;
+
+	if (!xntimer_periodic_p(&timer->timerbase))
+		timer->overruns = 0;
+	else {
+		now = xnclock_read_raw(xntimer_clock(&timer->timerbase));
+		timer->overruns = xntimer_get_overruns(&timer->timerbase,
+					       &waiter->threadbase, now);
+		if ((unsigned int)timer->overruns > COBALT_DELAYMAX)
+			timer->overruns = COBALT_DELAYMAX;
+	}
+
+	return timer->overruns;
+}
+
+void cobalt_timer_reclaim(struct cobalt_process *p)
+{
+	struct cobalt_timer *timer;
+	unsigned id;
+	spl_t s;
+	int ret;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (find_first_zero_bit(p->timers_map, CONFIG_XENO_OPT_NRTIMERS) ==
+		CONFIG_XENO_OPT_NRTIMERS)
+		goto out;
+
+	for (id = 0; id < ARRAY_SIZE(p->timers); id++) {
+		timer = cobalt_timer_by_id(p, id);
+		if (timer == NULL)
+			continue;
+
+		cobalt_call_extension(timer_cleanup, &timer->extref, ret);
+		timer_cleanup(p, timer);
+		xnlock_put_irqrestore(&nklock, s);
+		xnfree(timer);
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+}
+++ linux-patched/kernel/xenomai/sched-tp.c	2022-03-21 12:58:28.890893827 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-weak.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/uapi/sched.h>
+
+static void tp_schedule_next(struct xnsched_tp *tp)
+{
+	struct xnsched_tp_window *w;
+	struct xnsched *sched;
+	int p_next, ret;
+	xnticks_t t;
+
+	for (;;) {
+		/*
+		 * Switch to the next partition. Time holes in a
+		 * global time frame are defined as partition windows
+		 * assigned to part# -1, in which case the (always
+		 * empty) idle queue will be polled for runnable
+		 * threads.  Therefore, we may assume that a window
+		 * begins immediately after the previous one ends,
+		 * which simplifies the implementation a lot.
+		 */
+		w = &tp->gps->pwins[tp->wnext];
+		p_next = w->w_part;
+		tp->tps = p_next < 0 ? &tp->idle : &tp->partitions[p_next];
+
+		/* Schedule tick to advance to the next window. */
+		tp->wnext = (tp->wnext + 1) % tp->gps->pwin_nr;
+		w = &tp->gps->pwins[tp->wnext];
+		t = tp->tf_start + w->w_offset;
+
+		ret = xntimer_start(&tp->tf_timer, t, XN_INFINITE, XN_ABSOLUTE);
+		if (ret != -ETIMEDOUT)
+			break;
+		/*
+		 * We are late, make sure to remain within the bounds
+		 * of a valid time frame before advancing to the next
+		 * window. Otherwise, fix up by advancing to the next
+		 * time frame immediately.
+		 */
+		for (;;) {
+			t = tp->tf_start + tp->gps->tf_duration;
+			if (xnclock_read_monotonic(&nkclock) > t) {
+				tp->tf_start = t;
+				tp->wnext = 0;
+			} else
+				break;
+		}
+	}
+
+	sched = container_of(tp, struct xnsched, tp);
+	xnsched_set_resched(sched);
+}
+
+static void tp_tick_handler(struct xntimer *timer)
+{
+	struct xnsched_tp *tp = container_of(timer, struct xnsched_tp, tf_timer);
+	/*
+	 * Advance beginning date of time frame by a full period if we
+	 * are processing the last window.
+	 */
+	if (tp->wnext + 1 == tp->gps->pwin_nr)
+		tp->tf_start += tp->gps->tf_duration;
+
+	tp_schedule_next(tp);
+}
+
+static void xnsched_tp_init(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+	char timer_name[XNOBJECT_NAME_LEN];
+	int n;
+
+	for (n = 0; n < CONFIG_XENO_OPT_SCHED_TP_NRPART; n++)
+		xnsched_initq(&tp->partitions[n].runnable);
+
+	xnsched_initq(&tp->idle.runnable);
+
+#ifdef CONFIG_SMP
+	ksformat(timer_name, sizeof(timer_name), "[tp-tick/%u]", sched->cpu);
+#else
+	strcpy(timer_name, "[tp-tick]");
+#endif
+	tp->tps = NULL;
+	tp->gps = NULL;
+	INIT_LIST_HEAD(&tp->threads);
+	xntimer_init(&tp->tf_timer, &nkclock, tp_tick_handler,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&tp->tf_timer, timer_name);
+}
+
+static bool xnsched_tp_setparam(struct xnthread *thread,
+				const union xnsched_policy_param *p)
+{
+	struct xnsched *sched = thread->sched;
+
+	thread->tps = &sched->tp.partitions[p->tp.ptid];
+	xnthread_clear_state(thread, XNWEAK);
+
+	return xnsched_set_effective_priority(thread, p->tp.prio);
+}
+
+static void xnsched_tp_getparam(struct xnthread *thread,
+				union xnsched_policy_param *p)
+{
+	p->tp.prio = thread->cprio;
+	p->tp.ptid = thread->tps - thread->sched->tp.partitions;
+}
+
+static void xnsched_tp_trackprio(struct xnthread *thread,
+				 const union xnsched_policy_param *p)
+{
+	/*
+	 * The assigned partition never changes internally due to PI
+	 * (see xnsched_track_policy), since this would be pretty
+	 * wrong with respect to TP scheduling: i.e. we may not allow
+	 * a thread from another partition to consume CPU time from
+	 * the current one, despite this would help enforcing PI (see
+	 * note). In any case, introducing resource contention between
+	 * threads that belong to different partitions is utterly
+	 * wrong in the first place.  Only an explicit call to
+	 * xnsched_set_policy() may change the partition assigned to a
+	 * thread. For that reason, a policy reset action only boils
+	 * down to reinstating the base priority.
+	 *
+	 * NOTE: we do allow threads from lower scheduling classes to
+	 * consume CPU time from the current window as a result of a
+	 * PI boost, since this is aimed at speeding up the release of
+	 * a synchronization object a TP thread needs.
+	 */
+	if (p) {
+		/* We should never cross partition boundaries. */
+		XENO_WARN_ON(COBALT,
+			   thread->base_class == &xnsched_class_tp &&
+			   thread->tps - thread->sched->tp.partitions != p->tp.ptid);
+		thread->cprio = p->tp.prio;
+	} else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_tp_protectprio(struct xnthread *thread, int prio)
+{
+  	if (prio > XNSCHED_TP_MAX_PRIO)
+		prio = XNSCHED_TP_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_tp_chkparam(struct xnthread *thread,
+			       const union xnsched_policy_param *p)
+{
+	struct xnsched_tp *tp = &thread->sched->tp;
+
+	if (p->tp.ptid < 0 ||
+		p->tp.ptid >= CONFIG_XENO_OPT_SCHED_TP_NRPART)
+		return -EINVAL;
+
+	if (tp->gps == NULL ||
+	    p->tp.prio < XNSCHED_TP_MIN_PRIO ||
+	    p->tp.prio > XNSCHED_TP_MAX_PRIO)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int xnsched_tp_declare(struct xnthread *thread,
+			      const union xnsched_policy_param *p)
+{
+	struct xnsched *sched = thread->sched;
+
+	list_add_tail(&thread->tp_link, &sched->tp.threads);
+
+	return 0;
+}
+
+static void xnsched_tp_forget(struct xnthread *thread)
+{
+	list_del(&thread->tp_link);
+	thread->tps = NULL;
+}
+
+static void xnsched_tp_enqueue(struct xnthread *thread)
+{
+	xnsched_addq_tail(&thread->tps->runnable, thread);
+}
+
+static void xnsched_tp_dequeue(struct xnthread *thread)
+{
+	xnsched_delq(&thread->tps->runnable, thread);
+}
+
+static void xnsched_tp_requeue(struct xnthread *thread)
+{
+	xnsched_addq(&thread->tps->runnable, thread);
+}
+
+static struct xnthread *xnsched_tp_pick(struct xnsched *sched)
+{
+	/* Never pick a thread if we don't schedule partitions. */
+	if (!xntimer_running_p(&sched->tp.tf_timer))
+		return NULL;
+
+	return xnsched_getq(&sched->tp.tps->runnable);
+}
+
+static void xnsched_tp_migrate(struct xnthread *thread, struct xnsched *sched)
+{
+	union xnsched_policy_param param;
+	/*
+	 * Since our partition schedule is a per-scheduler property,
+	 * it cannot apply to a thread that moves to another CPU
+	 * anymore. So we upgrade that thread to the RT class when a
+	 * CPU migration occurs. A subsequent call to
+	 * __xnthread_set_schedparam() may move it back to TP
+	 * scheduling, with a partition assignment that fits the
+	 * remote CPU's partition schedule.
+	 */
+	param.rt.prio = thread->cprio;
+	__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+}
+
+void xnsched_tp_start_schedule(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+
+	if (tp->gps == NULL)
+		return;
+
+	tp->wnext = 0;
+	tp->tf_start = xnclock_read_monotonic(&nkclock);
+	tp_schedule_next(tp);
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_start_schedule);
+
+void xnsched_tp_stop_schedule(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+
+	if (tp->gps)
+		xntimer_stop(&tp->tf_timer);
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_stop_schedule);
+
+struct xnsched_tp_schedule *
+xnsched_tp_set_schedule(struct xnsched *sched,
+			struct xnsched_tp_schedule *gps)
+{
+	struct xnsched_tp_schedule *old_gps;
+	struct xnsched_tp *tp = &sched->tp;
+	union xnsched_policy_param param;
+	struct xnthread *thread, *tmp;
+
+	XENO_BUG_ON(COBALT, gps != NULL &&
+		   (gps->pwin_nr <= 0 || gps->pwins[0].w_offset != 0));
+
+	xnsched_tp_stop_schedule(sched);
+
+	/*
+	 * Move all TP threads on this scheduler to the RT class,
+	 * until we call __xnthread_set_schedparam() for them again.
+	 */
+	if (list_empty(&tp->threads))
+		goto done;
+
+	list_for_each_entry_safe(thread, tmp, &tp->threads, tp_link) {
+		param.rt.prio = thread->cprio;
+		__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+	}
+done:
+	old_gps = tp->gps;
+	tp->gps = gps;
+
+	return old_gps;
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_set_schedule);
+
+struct xnsched_tp_schedule *
+xnsched_tp_get_schedule(struct xnsched *sched)
+{
+	struct xnsched_tp_schedule *gps;
+
+	gps = sched->tp.gps;
+	if (gps == NULL)
+		return NULL;
+
+	atomic_inc(&gps->refcount);
+
+	return gps;
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_get_schedule);
+
+void xnsched_tp_put_schedule(struct xnsched_tp_schedule *gps)
+{
+	if (atomic_dec_and_test(&gps->refcount))
+		xnfree(gps);
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_put_schedule);
+
+int xnsched_tp_get_partition(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+
+	if (tp->tps == NULL || tp->tps == &tp->idle)
+		return -1;
+
+	return tp->tps - tp->partitions;
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_get_partition);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_tp_vfroot;
+
+struct vfile_sched_tp_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_tp_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	int prio;
+	int ptid;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_tp_ops;
+
+static struct xnvfile_snapshot vfile_sched_tp = {
+	.privsz = sizeof(struct vfile_sched_tp_priv),
+	.datasz = sizeof(struct vfile_sched_tp_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_tp_ops,
+};
+
+static int vfile_sched_tp_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_tp_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_tp.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_tp_next(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_tp_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_tp_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_tp)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->ptid = thread->tps - thread->sched->tp.partitions;
+	p->prio = thread->cprio;
+
+	return 1;
+}
+
+static int vfile_sched_tp_show(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_tp_data *p = data;
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-4s %-4s  %s\n",
+			       "CPU", "PID", "PTID", "PRI", "NAME");
+	else
+		xnvfile_printf(it, "%3u  %-6d %-4d %-4d  %s\n",
+			       p->cpu,
+			       p->pid,
+			       p->ptid,
+			       p->prio,
+			       p->name);
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_tp_ops = {
+	.rewind = vfile_sched_tp_rewind,
+	.next = vfile_sched_tp_next,
+	.show = vfile_sched_tp_show,
+};
+
+static int xnsched_tp_init_vfile(struct xnsched_class *schedclass,
+				 struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_tp_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_tp,
+				     &sched_tp_vfroot);
+}
+
+static void xnsched_tp_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_tp);
+	xnvfile_destroy_dir(&sched_tp_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_tp = {
+	.sched_init		=	xnsched_tp_init,
+	.sched_enqueue		=	xnsched_tp_enqueue,
+	.sched_dequeue		=	xnsched_tp_dequeue,
+	.sched_requeue		=	xnsched_tp_requeue,
+	.sched_pick		=	xnsched_tp_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_migrate		=	xnsched_tp_migrate,
+	.sched_chkparam		=	xnsched_tp_chkparam,
+	.sched_setparam		=	xnsched_tp_setparam,
+	.sched_getparam		=	xnsched_tp_getparam,
+	.sched_trackprio	=	xnsched_tp_trackprio,
+	.sched_protectprio	=	xnsched_tp_protectprio,
+	.sched_declare		=	xnsched_tp_declare,
+	.sched_forget		=	xnsched_tp_forget,
+	.sched_kick		=	NULL,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_tp_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_tp_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(2),
+	.policy			=	SCHED_TP,
+	.name			=	"tp"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_tp);
+++ linux-patched/kernel/xenomai/sched-weak.c	2022-03-21 12:58:28.887893857 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/procfs.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+#include <cobalt/uapi/sched.h>
+
+static void xnsched_weak_init(struct xnsched *sched)
+{
+	xnsched_initq(&sched->weak.runnable);
+}
+
+static void xnsched_weak_requeue(struct xnthread *thread)
+{
+	xnsched_addq(&thread->sched->weak.runnable, thread);
+}
+
+static void xnsched_weak_enqueue(struct xnthread *thread)
+{
+	xnsched_addq_tail(&thread->sched->weak.runnable, thread);
+}
+
+static void xnsched_weak_dequeue(struct xnthread *thread)
+{
+	xnsched_delq(&thread->sched->weak.runnable, thread);
+}
+
+static struct xnthread *xnsched_weak_pick(struct xnsched *sched)
+{
+	return xnsched_getq(&sched->weak.runnable);
+}
+
+static bool xnsched_weak_setparam(struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	if (!xnthread_test_state(thread, XNBOOST))
+		xnthread_set_state(thread, XNWEAK);
+
+	return xnsched_set_effective_priority(thread, p->weak.prio);
+}
+
+static void xnsched_weak_getparam(struct xnthread *thread,
+				  union xnsched_policy_param *p)
+{
+	p->weak.prio = thread->cprio;
+}
+
+static void xnsched_weak_trackprio(struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	if (p)
+		thread->cprio = p->weak.prio;
+	else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_weak_protectprio(struct xnthread *thread, int prio)
+{
+  	if (prio > XNSCHED_WEAK_MAX_PRIO)
+		prio = XNSCHED_WEAK_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_weak_chkparam(struct xnthread *thread,
+				 const union xnsched_policy_param *p)
+{
+	if (p->weak.prio < XNSCHED_WEAK_MIN_PRIO ||
+	    p->weak.prio > XNSCHED_WEAK_MAX_PRIO)
+		return -EINVAL;
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_weak_vfroot;
+
+struct vfile_sched_weak_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_weak_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	int cprio;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_weak_ops;
+
+static struct xnvfile_snapshot vfile_sched_weak = {
+	.privsz = sizeof(struct vfile_sched_weak_priv),
+	.datasz = sizeof(struct vfile_sched_weak_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_weak_ops,
+};
+
+static int vfile_sched_weak_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_weak_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_weak.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_weak_next(struct xnvfile_snapshot_iterator *it,
+				 void *data)
+{
+	struct vfile_sched_weak_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_weak_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_weak)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->cprio = thread->cprio;
+
+	return 1;
+}
+
+static int vfile_sched_weak_show(struct xnvfile_snapshot_iterator *it,
+				 void *data)
+{
+	struct vfile_sched_weak_data *p = data;
+	char pribuf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-4s %s\n",
+			       "CPU", "PID", "PRI", "NAME");
+	else {
+		ksformat(pribuf, sizeof(pribuf), "%3d", p->cprio);
+		xnvfile_printf(it, "%3u  %-6d %-4s %s\n",
+			       p->cpu,
+			       p->pid,
+			       pribuf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_weak_ops = {
+	.rewind = vfile_sched_weak_rewind,
+	.next = vfile_sched_weak_next,
+	.show = vfile_sched_weak_show,
+};
+
+static int xnsched_weak_init_vfile(struct xnsched_class *schedclass,
+				   struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_weak_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_weak,
+				     &sched_weak_vfroot);
+}
+
+static void xnsched_weak_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_weak);
+	xnvfile_destroy_dir(&sched_weak_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_weak = {
+	.sched_init		=	xnsched_weak_init,
+	.sched_enqueue		=	xnsched_weak_enqueue,
+	.sched_dequeue		=	xnsched_weak_dequeue,
+	.sched_requeue		=	xnsched_weak_requeue,
+	.sched_pick		=	xnsched_weak_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_forget		=	NULL,
+	.sched_kick		=	NULL,
+	.sched_chkparam		=	xnsched_weak_chkparam,
+	.sched_setparam		=	xnsched_weak_setparam,
+	.sched_trackprio	=	xnsched_weak_trackprio,
+	.sched_protectprio	=	xnsched_weak_protectprio,
+	.sched_getparam		=	xnsched_weak_getparam,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_weak_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_weak_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(1),
+	.policy			=	SCHED_WEAK,
+	.name			=	"weak"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_weak);
+++ linux-patched/kernel/xenomai/procfs.c	2022-03-21 12:58:28.883893896 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-sporadic.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/sched.h>
+#include <xenomai/version.h>
+#include "debug.h"
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+
+static int lock_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct xnlockinfo lockinfo;
+	spl_t s;
+	int cpu;
+
+	for_each_realtime_cpu(cpu) {
+		xnlock_get_irqsave(&nklock, s);
+		lockinfo = per_cpu(xnlock_stats, cpu);
+		xnlock_put_irqrestore(&nklock, s);
+
+		if (cpu > 0)
+			xnvfile_printf(it, "\n");
+
+		xnvfile_printf(it, "CPU%d:\n", cpu);
+
+		xnvfile_printf(it,
+			     "  longest locked section: %llu ns\n"
+			     "  spinning time: %llu ns\n"
+			     "  section entry: %s:%d (%s)\n",
+			       xnclock_ticks_to_ns(&nkclock, lockinfo.lock_time),
+			       xnclock_ticks_to_ns(&nkclock, lockinfo.spin_time),
+			       lockinfo.file, lockinfo.line, lockinfo.function);
+	}
+
+	return 0;
+}
+
+static ssize_t lock_vfile_store(struct xnvfile_input *input)
+{
+	ssize_t ret;
+	spl_t s;
+	int cpu;
+
+	long val;
+
+	ret = xnvfile_get_integer(input, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val != 0)
+		return -EINVAL;
+
+	for_each_realtime_cpu(cpu) {
+		xnlock_get_irqsave(&nklock, s);
+		memset(&per_cpu(xnlock_stats, cpu), '\0', sizeof(struct xnlockinfo));
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return ret;
+}
+
+static struct xnvfile_regular_ops lock_vfile_ops = {
+	.show = lock_vfile_show,
+	.store = lock_vfile_store,
+};
+
+static struct xnvfile_regular lock_vfile = {
+	.ops = &lock_vfile_ops,
+};
+
+#endif /* CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+static int latency_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	xnvfile_printf(it, "%Lu\n",
+		       xnclock_ticks_to_ns(&nkclock, nkclock.gravity.user));
+
+	return 0;
+}
+
+static ssize_t latency_vfile_store(struct xnvfile_input *input)
+{
+	ssize_t ret;
+	long val;
+
+	ret = xnvfile_get_integer(input, &val);
+	if (ret < 0)
+		return ret;
+
+	nkclock.gravity.user = xnclock_ns_to_ticks(&nkclock, val);
+
+	return ret;
+}
+
+static struct xnvfile_regular_ops latency_vfile_ops = {
+	.show = latency_vfile_show,
+	.store = latency_vfile_store,
+};
+
+static struct xnvfile_regular latency_vfile = {
+	.ops = &latency_vfile_ops,
+};
+
+static int version_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	xnvfile_printf(it, "%s\n", XENO_VERSION_STRING);
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops version_vfile_ops = {
+	.show = version_vfile_show,
+};
+
+static struct xnvfile_regular version_vfile = {
+	.ops = &version_vfile_ops,
+};
+
+static int faults_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	int cpu, trap;
+
+	xnvfile_puts(it, "TRAP ");
+
+	for_each_realtime_cpu(cpu)
+		xnvfile_printf(it, "        CPU%d", cpu);
+
+	for (trap = 0; cobalt_machine.fault_labels[trap]; trap++) {
+		if (*cobalt_machine.fault_labels[trap] == '\0')
+			continue;
+
+		xnvfile_printf(it, "\n%3d: ", trap);
+
+		for_each_realtime_cpu(cpu)
+			xnvfile_printf(it, "%12u",
+				       per_cpu(cobalt_machine_cpudata, cpu).faults[trap]);
+
+		xnvfile_printf(it, "    (%s)",
+			       cobalt_machine.fault_labels[trap]);
+	}
+
+	xnvfile_putc(it, '\n');
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops faults_vfile_ops = {
+	.show = faults_vfile_show,
+};
+
+static struct xnvfile_regular faults_vfile = {
+	.ops = &faults_vfile_ops,
+};
+
+void xnprocfs_cleanup_tree(void)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+	xnvfile_destroy_regular(&lock_vfile);
+#endif
+	xnvfile_destroy_dir(&cobalt_debug_vfroot);
+#endif /* XENO_OPT_DEBUG */
+	xnvfile_destroy_regular(&faults_vfile);
+	xnvfile_destroy_regular(&version_vfile);
+	xnvfile_destroy_regular(&latency_vfile);
+	xnintr_cleanup_proc();
+	xnheap_cleanup_proc();
+	xnclock_cleanup_proc();
+	xnsched_cleanup_proc();
+	xnvfile_destroy_root();
+}
+
+int __init xnprocfs_init_tree(void)
+{
+	int ret;
+
+	ret = xnvfile_init_root();
+	if (ret)
+		return ret;
+
+	ret = xnsched_init_proc();
+	if (ret)
+		return ret;
+
+	xnclock_init_proc();
+	xnheap_init_proc();
+	xnintr_init_proc();
+	xnvfile_init_regular("latency", &latency_vfile, &cobalt_vfroot);
+	xnvfile_init_regular("version", &version_vfile, &cobalt_vfroot);
+	xnvfile_init_regular("faults", &faults_vfile, &cobalt_vfroot);
+#ifdef CONFIG_XENO_OPT_DEBUG
+	xnvfile_init_dir("debug", &cobalt_debug_vfroot, &cobalt_vfroot);
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+	xnvfile_init_regular("lock", &lock_vfile, &cobalt_debug_vfroot);
+#endif
+#endif
+
+	return 0;
+}
+++ linux-patched/kernel/xenomai/sched-sporadic.c	2022-03-21 12:58:28.879893935 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/vfile.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/uapi/sched.h>
+
+#define MAX_REPLENISH CONFIG_XENO_OPT_SCHED_SPORADIC_MAXREPL
+
+static void sporadic_post_recharge(struct xnthread *thread, xnticks_t budget);
+
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+
+static inline void sporadic_note_late_drop(struct xnsched *sched)
+{
+	/*
+	 * This code should pull the break when a misconfigured
+	 * sporadic thread is late on its drop date for more than a
+	 * hundred times in a row. This normally reveals a time budget
+	 * which is too tight.
+	 */
+	XENO_BUG_ON(COBALT, ++sched->pss.drop_retries > 100);
+}
+
+static inline void sporadic_note_valid_drop(struct xnsched *sched)
+{
+	sched->pss.drop_retries = 0;
+}
+
+#else /* !CONFIG_XENO_OPT_DEBUG_COBALT */
+
+static inline void sporadic_note_late_drop(struct xnsched *sched)
+{
+}
+
+static inline void sporadic_note_valid_drop(struct xnsched *sched)
+{
+}
+
+#endif /* !CONFIG_XENO_OPT_DEBUG_COBALT */
+
+static inline xnticks_t sporadic_diff_time(xnticks_t start, xnticks_t end)
+{
+	xnsticks_t d = (xnsticks_t)(end - start);
+	return unlikely(d < 0) ? -d : d;
+}
+
+static void sporadic_drop_handler(struct xntimer *timer)
+{
+	struct xnsched_sporadic_data *pss;
+	union xnsched_policy_param p;
+	struct xnthread *thread;
+
+	/*
+	 * XXX: this code will work properly regardless of
+	 * primary/secondary mode issues.
+	 */
+	pss = container_of(timer, struct xnsched_sporadic_data, drop_timer);
+	thread = pss->thread;
+
+	sporadic_post_recharge(thread, pss->budget);
+
+	if (pss->budget == 0 && thread->cprio > pss->param.low_prio) {
+		if (pss->param.low_prio < 0)
+			/*
+			 * Special case: low_prio == -1, we want the
+			 * thread to suspend until a replenishment
+			 * happens.
+			 */
+			xnthread_suspend(thread, XNHELD,
+					 XN_INFINITE, XN_RELATIVE, NULL);
+		else {
+			p.pss.init_budget = 0;
+			p.pss.current_prio = pss->param.low_prio;
+			/* Move sporadic thread to the background. */
+			__xnthread_set_schedparam(thread, &xnsched_class_sporadic, &p);
+		}
+	}
+}
+
+static void sporadic_schedule_drop(struct xnthread *thread)
+{
+	xnticks_t now = xnclock_read_monotonic(&nkclock);
+	struct xnsched_sporadic_data *pss = thread->pss;
+	int ret;
+
+	pss->resume_date = now;
+	/*
+	 * Assuming this timer should not fire that often unless the
+	 * monitored thread behaves badly, we don't pin it on the CPU
+	 * the thread is running, trading cycles at firing time
+	 * against cycles when arming the timer.
+	 */
+	xntimer_set_affinity(&pss->drop_timer, thread->sched);
+	ret = xntimer_start(&pss->drop_timer, now + pss->budget,
+			    XN_INFINITE, XN_ABSOLUTE);
+	if (ret == -ETIMEDOUT) {
+		sporadic_note_late_drop(thread->sched);
+		sporadic_drop_handler(&pss->drop_timer);
+	} else
+		sporadic_note_valid_drop(thread->sched);
+}
+
+static void sporadic_replenish_handler(struct xntimer *timer)
+{
+	struct xnsched_sporadic_data *pss;
+	union xnsched_policy_param p;
+	struct xnthread *thread;
+	xnticks_t now;
+	int r, ret;
+
+	pss = container_of(timer, struct xnsched_sporadic_data, repl_timer);
+	thread = pss->thread;
+	XENO_BUG_ON(COBALT, pss->repl_pending <= 0);
+
+retry:
+	now = xnclock_read_monotonic(&nkclock);
+
+	do {
+		r = pss->repl_out;
+		if ((xnsticks_t)(now - pss->repl_data[r].date) <= 0)
+			break;
+		pss->budget += pss->repl_data[r].amount;
+		if (pss->budget > pss->param.init_budget)
+			pss->budget = pss->param.init_budget;
+		pss->repl_out = (r + 1) % MAX_REPLENISH;
+	} while(--pss->repl_pending > 0);
+
+	if (pss->repl_pending > 0) {
+		xntimer_set_affinity(&pss->repl_timer, thread->sched);
+		ret = xntimer_start(&pss->repl_timer, pss->repl_data[r].date,
+				    XN_INFINITE, XN_ABSOLUTE);
+		if (ret == -ETIMEDOUT)
+			goto retry; /* This plugs a tiny race. */
+	}
+
+	if (pss->budget == 0)
+		return;
+
+	if (xnthread_test_state(thread, XNHELD))
+		xnthread_resume(thread, XNHELD);
+	else if (thread->cprio < pss->param.normal_prio) {
+		p.pss.init_budget = 0;
+		p.pss.current_prio = pss->param.normal_prio;
+		/* Move sporadic thread to the foreground. */
+		__xnthread_set_schedparam(thread, &xnsched_class_sporadic, &p);
+	}
+
+	/*
+	 * XXX: we have to reset the drop timer in case we preempted
+	 * the thread which just got a budget increase.
+	 */
+	if (thread->sched->curr == thread)
+		sporadic_schedule_drop(thread);
+}
+
+static void sporadic_post_recharge(struct xnthread *thread, xnticks_t budget)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+	int r, ret;
+
+	if (pss->repl_pending >= pss->param.max_repl)
+		return;
+
+	if (budget > pss->budget) {
+		budget = pss->budget;
+		pss->budget = 0;
+	} else
+		pss->budget -= budget;
+
+	r = pss->repl_in;
+	pss->repl_data[r].date = pss->resume_date + pss->param.repl_period;
+	pss->repl_data[r].amount = budget;
+	pss->repl_in = (r + 1) % MAX_REPLENISH;
+
+	if (pss->repl_pending++ == 0) {
+		xntimer_set_affinity(&pss->repl_timer, thread->sched);
+		ret = xntimer_start(&pss->repl_timer, pss->repl_data[r].date,
+				    XN_INFINITE, XN_ABSOLUTE);
+		/*
+		 * The following case should not happen unless the
+		 * initial budget value is inappropriate, but let's
+		 * handle it anyway.
+		 */
+		if (ret == -ETIMEDOUT)
+			sporadic_replenish_handler(&pss->repl_timer);
+	}
+}
+
+static void sporadic_suspend_activity(struct xnthread *thread)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+	xnticks_t budget, now;
+
+	if (pss->budget > 0) {
+		xntimer_stop(&pss->drop_timer);
+		now = xnclock_read_monotonic(&nkclock);
+		budget = sporadic_diff_time(now, pss->resume_date);
+		sporadic_post_recharge(thread, budget);
+	}
+}
+
+static inline void sporadic_resume_activity(struct xnthread *thread)
+{
+	if (thread->pss->budget > 0)
+		sporadic_schedule_drop(thread);
+}
+
+static void xnsched_sporadic_init(struct xnsched *sched)
+{
+	/*
+	 * We litterally stack the sporadic scheduler on top of the RT
+	 * one, reusing its run queue directly. This way, RT and
+	 * sporadic threads are merged into the same runqueue and thus
+	 * share the same priority scale, with the addition of budget
+	 * management for the sporadic ones.
+	 */
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	sched->pss.drop_retries = 0;
+#endif
+}
+
+static bool xnsched_sporadic_setparam(struct xnthread *thread,
+				      const union xnsched_policy_param *p)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+	bool effective;
+
+	xnthread_clear_state(thread, XNWEAK);
+	effective = xnsched_set_effective_priority(thread, p->pss.current_prio);
+
+	/*
+	 * We use the budget information to determine whether we got
+	 * here from one of our internal calls to
+	 * xnthread_set_schedparam(), in which case we don't want to
+	 * update the scheduling parameters, but only set the
+	 * effective priority.
+	 */
+	if (p->pss.init_budget > 0) {
+		pss->param = p->pss;
+		pss->budget = p->pss.init_budget;
+		pss->repl_in = 0;
+		pss->repl_out = 0;
+		pss->repl_pending = 0;
+		if (effective && thread == thread->sched->curr) {
+			xntimer_stop(&pss->drop_timer);
+			sporadic_schedule_drop(thread);
+		}
+	}
+
+	return effective;
+}
+
+static void xnsched_sporadic_getparam(struct xnthread *thread,
+				      union xnsched_policy_param *p)
+{
+	p->pss = thread->pss->param;
+	p->pss.current_prio = thread->cprio;
+}
+
+static void xnsched_sporadic_trackprio(struct xnthread *thread,
+				       const union xnsched_policy_param *p)
+{
+	if (p)
+		thread->cprio = p->pss.current_prio;
+	else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_sporadic_protectprio(struct xnthread *thread, int prio)
+{
+	if (prio > XNSCHED_SPORADIC_MAX_PRIO)
+		prio = XNSCHED_SPORADIC_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_sporadic_chkparam(struct xnthread *thread,
+				     const union xnsched_policy_param *p)
+{
+	if (p->pss.low_prio != -1 &&
+	    (p->pss.low_prio < XNSCHED_SPORADIC_MIN_PRIO ||
+	     p->pss.low_prio > XNSCHED_SPORADIC_MAX_PRIO))
+		return -EINVAL;
+
+	if (p->pss.normal_prio < XNSCHED_SPORADIC_MIN_PRIO ||
+	    p->pss.normal_prio > XNSCHED_SPORADIC_MAX_PRIO)
+		return -EINVAL;
+
+	if (p->pss.init_budget == 0)
+		return -EINVAL;
+
+	if (p->pss.current_prio != p->pss.normal_prio)
+		return -EINVAL;
+
+	if (p->pss.repl_period < p->pss.init_budget)
+		return -EINVAL;
+
+	if (p->pss.normal_prio <= p->pss.low_prio)
+		return -EINVAL;
+
+	if (p->pss.max_repl < 1 || p->pss.max_repl > MAX_REPLENISH)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int xnsched_sporadic_declare(struct xnthread *thread,
+				    const union xnsched_policy_param *p)
+{
+	struct xnsched_sporadic_data *pss;
+
+	pss = xnmalloc(sizeof(*pss));
+	if (pss == NULL)
+		return -ENOMEM;
+
+	xntimer_init(&pss->repl_timer, &nkclock, sporadic_replenish_handler,
+		     thread->sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&pss->repl_timer, "pss-replenish");
+	xntimer_init(&pss->drop_timer, &nkclock, sporadic_drop_handler,
+		     thread->sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&pss->drop_timer, "pss-drop");
+
+	thread->pss = pss;
+	pss->thread = thread;
+
+	return 0;
+}
+
+static void xnsched_sporadic_forget(struct xnthread *thread)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+
+	xntimer_destroy(&pss->repl_timer);
+	xntimer_destroy(&pss->drop_timer);
+	xnfree(pss);
+	thread->pss = NULL;
+}
+
+static void xnsched_sporadic_enqueue(struct xnthread *thread)
+{
+	__xnsched_rt_enqueue(thread);
+}
+
+static void xnsched_sporadic_dequeue(struct xnthread *thread)
+{
+	__xnsched_rt_dequeue(thread);
+}
+
+static void xnsched_sporadic_requeue(struct xnthread *thread)
+{
+	__xnsched_rt_requeue(thread);
+}
+
+static struct xnthread *xnsched_sporadic_pick(struct xnsched *sched)
+{
+	struct xnthread *curr = sched->curr, *next;
+
+	next = xnsched_getq(&sched->rt.runnable);
+	if (next == NULL)
+		goto swap;
+
+	if (curr == next)
+		return next;
+
+	/* Arm the drop timer for an incoming sporadic thread. */
+	if (next->pss)
+		sporadic_resume_activity(next);
+swap:
+	/*
+	 * A non-sporadic outgoing thread is having a priority
+	 * inheritance boost, so apply an infinite time budget as we
+	 * want it to release the claimed resource asap. Otherwise,
+	 * clear the drop timer, then schedule a replenishment
+	 * operation.
+	 */
+	if (curr->pss)
+		sporadic_suspend_activity(curr);
+
+	return next;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_sporadic_vfroot;
+
+struct vfile_sched_sporadic_priv {
+	int nrthreads;
+	struct xnthread *curr;
+};
+
+struct vfile_sched_sporadic_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	int current_prio;
+	int low_prio;
+	int normal_prio;
+	xnticks_t period;
+	xnticks_t timeout;
+	xnticks_t budget;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_sporadic_ops;
+
+static struct xnvfile_snapshot vfile_sched_sporadic = {
+	.privsz = sizeof(struct vfile_sched_sporadic_priv),
+	.datasz = sizeof(struct vfile_sched_sporadic_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_sporadic_ops,
+};
+
+static int vfile_sched_sporadic_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_sporadic_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_sporadic.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_sporadic_next(struct xnvfile_snapshot_iterator *it,
+				     void *data)
+{
+	struct vfile_sched_sporadic_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_sporadic_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_sporadic)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->current_prio = thread->cprio;
+	p->low_prio = thread->pss->param.low_prio;
+	p->normal_prio = thread->pss->param.normal_prio;
+	p->period = xnthread_get_period(thread);
+	p->budget = thread->pss->param.init_budget;
+
+	return 1;
+}
+
+static int vfile_sched_sporadic_show(struct xnvfile_snapshot_iterator *it,
+				     void *data)
+{
+	char lpbuf[16], npbuf[16], ptbuf[16], btbuf[16];
+	struct vfile_sched_sporadic_data *p = data;
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-6s %-4s %-4s  %-10s %-10s %s\n",
+			       "CPU", "PID", "LPRI", "NPRI", "BUDGET",
+			       "PERIOD", "NAME");
+	else {
+		ksformat(lpbuf, sizeof(lpbuf), "%3d%c",
+			 p->low_prio, p->current_prio == p->low_prio ? '*' : ' ');
+
+		ksformat(npbuf, sizeof(npbuf), "%3d%c",
+			 p->normal_prio, p->current_prio == p->normal_prio ? '*' : ' ');
+
+		xntimer_format_time(p->period, ptbuf, sizeof(ptbuf));
+		xntimer_format_time(p->budget, btbuf, sizeof(btbuf));
+
+		xnvfile_printf(it,
+			       "%3u  %-6d %-4s %-4s  %-10s %-10s %s\n",
+			       p->cpu,
+			       p->pid,
+			       lpbuf,
+			       npbuf,
+			       btbuf,
+			       ptbuf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_sporadic_ops = {
+	.rewind = vfile_sched_sporadic_rewind,
+	.next = vfile_sched_sporadic_next,
+	.show = vfile_sched_sporadic_show,
+};
+
+static int xnsched_sporadic_init_vfile(struct xnsched_class *schedclass,
+				       struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name,
+			       &sched_sporadic_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_sporadic,
+				     &sched_sporadic_vfroot);
+}
+
+static void xnsched_sporadic_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_sporadic);
+	xnvfile_destroy_dir(&sched_sporadic_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_sporadic = {
+	.sched_init		=	xnsched_sporadic_init,
+	.sched_enqueue		=	xnsched_sporadic_enqueue,
+	.sched_dequeue		=	xnsched_sporadic_dequeue,
+	.sched_requeue		=	xnsched_sporadic_requeue,
+	.sched_pick		=	xnsched_sporadic_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_migrate		=	NULL,
+	.sched_chkparam		=	xnsched_sporadic_chkparam,
+	.sched_setparam		=	xnsched_sporadic_setparam,
+	.sched_getparam		=	xnsched_sporadic_getparam,
+	.sched_trackprio	=	xnsched_sporadic_trackprio,
+	.sched_protectprio	=	xnsched_sporadic_protectprio,
+	.sched_declare		=	xnsched_sporadic_declare,
+	.sched_forget		=	xnsched_sporadic_forget,
+	.sched_kick		=	NULL,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_sporadic_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_sporadic_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(3),
+	.policy			=	SCHED_SPORADIC,
+	.name			=	"pss"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_sporadic);
+++ linux-patched/kernel/xenomai/vfile.c	2022-03-21 12:58:28.875893974 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/init.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/ctype.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/vfile.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_vfile Virtual file services
+ *
+ * Virtual files provide a mean to export Xenomai object states to
+ * user-space, based on common kernel interfaces.  This encapsulation
+ * is aimed at:
+ *
+ * - supporting consistent collection of very large record-based
+ * output, without encurring latency peaks for undergoing real-time
+ * activities.
+ *
+ * - in the future, hiding discrepancies between linux kernel
+ * releases, regarding the proper way to export kernel object states
+ * to userland, either via the /proc interface or by any other mean.
+ *
+ * This virtual file implementation offers record-based read support
+ * based on seq_files, single-buffer write support, directory and link
+ * handling, all visible from the /proc namespace.
+ *
+ * The vfile support exposes four filesystem object types:
+ *
+ * - snapshot-driven file (struct xnvfile_snapshot). This is commonly
+ * used to export real-time object states via the /proc filesystem. To
+ * minimize the latency involved in protecting the vfile routines from
+ * changes applied by real-time code on such objects, a snapshot of
+ * the data to output is first taken under proper locking, before the
+ * collected data is formatted and sent out in a lockless manner.
+ *
+ * Because a large number of records may have to be output, the data
+ * collection phase is not strictly atomic as a whole, but only
+ * protected at record level. The vfile implementation can be notified
+ * of updates to the underlying data set, and restart the collection
+ * from scratch until the snapshot is fully consistent.
+ *
+ * - regular sequential file (struct xnvfile_regular). This is
+ * basically an encapsulated sequential file object as available from
+ * the host kernel (i.e. seq_file), with a few additional features to
+ * make it more handy in a Xenomai environment, like implicit locking
+ * support and shortened declaration for simplest, single-record
+ * output.
+ *
+ * - virtual link (struct xnvfile_link). This is a symbolic link
+ * feature integrated with the vfile semantics. The link target is
+ * computed dynamically at creation time from a user-given helper
+ * routine.
+ *
+ * - virtual directory (struct xnvfile_directory). A directory object,
+ * which can be used to create a hierarchy for ordering a set of vfile
+ * objects.
+ *
+ *@{*/
+
+/**
+ * @var struct xnvfile_directory cobalt_vfroot
+ * @brief Xenomai vfile root directory
+ *
+ * This vdir maps the /proc/xenomai directory. It can be used to
+ * create a hierarchy of Xenomai-related vfiles under this root.
+ */
+struct xnvfile_directory cobalt_vfroot;
+EXPORT_SYMBOL_GPL(cobalt_vfroot);
+
+static struct xnvfile_directory sysroot;
+
+static void *vfile_snapshot_start(struct seq_file *seq, loff_t *offp)
+{
+	struct xnvfile_snapshot_iterator *it = seq->private;
+	loff_t pos = *offp;
+
+	if (pos > it->nrdata)
+		return NULL;
+
+	if (pos == 0)
+		return SEQ_START_TOKEN;
+
+	return it->databuf + (pos - 1) * it->vfile->datasz;
+}
+
+static void *vfile_snapshot_next(struct seq_file *seq, void *v, loff_t *offp)
+{
+	struct xnvfile_snapshot_iterator *it = seq->private;
+	loff_t pos = *offp;
+
+	++*offp;
+
+	if (pos >= it->nrdata)
+		return NULL;
+
+	return it->databuf + pos * it->vfile->datasz;
+}
+
+static void vfile_snapshot_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int vfile_snapshot_show(struct seq_file *seq, void *v)
+{
+	struct xnvfile_snapshot_iterator *it = seq->private;
+	void *data = v == SEQ_START_TOKEN ? NULL : v;
+	int ret;
+
+	ret = it->vfile->ops->show(it, data);
+
+	return ret == VFILE_SEQ_SKIP ? SEQ_SKIP : ret;
+}
+
+static struct seq_operations vfile_snapshot_ops = {
+	.start = vfile_snapshot_start,
+	.next = vfile_snapshot_next,
+	.stop = vfile_snapshot_stop,
+	.show = vfile_snapshot_show
+};
+
+static void vfile_snapshot_free(struct xnvfile_snapshot_iterator *it, void *buf)
+{
+	kfree(buf);
+}
+
+static int vfile_snapshot_open(struct inode *inode, struct file *file)
+{
+	struct xnvfile_snapshot *vfile = PDE_DATA(inode);
+	struct xnvfile_snapshot_ops *ops = vfile->ops;
+	struct xnvfile_snapshot_iterator *it;
+	int revtag, ret, nrdata;
+	struct seq_file *seq;
+	caddr_t data;
+
+	WARN_ON_ONCE(file->private_data != NULL);
+
+	if ((file->f_mode & FMODE_WRITE) != 0 && ops->store == NULL)
+		return -EACCES;
+
+	/*
+	 * Make sure to create the seq_file backend only when reading
+	 * from the v-file is possible.
+	 */
+	if ((file->f_mode & FMODE_READ) == 0) {
+		file->private_data = NULL;
+		return 0;
+	}
+
+	if ((file->f_flags & O_EXCL) != 0 && xnvfile_nref(vfile) > 0)
+		return -EBUSY;
+
+	it = kzalloc(sizeof(*it) + vfile->privsz, GFP_KERNEL);
+	if (it == NULL)
+		return -ENOMEM;
+
+	it->vfile = vfile;
+	xnvfile_file(vfile) = file;
+
+	ret = vfile->entry.lockops->get(&vfile->entry);
+	if (ret)
+		goto fail;
+redo:
+	/*
+	 * The ->rewind() method is optional; there may be cases where
+	 * we don't have to take an atomic snapshot of the v-file
+	 * contents before proceeding. In case ->rewind() detects a
+	 * stale backend object, it can force us to bail out.
+	 *
+	 * If present, ->rewind() may return a strictly positive
+	 * value, indicating how many records at most may be returned
+	 * by ->next(). We use this hint to allocate the snapshot
+	 * buffer, in case ->begin() is not provided. The size of this
+	 * buffer would then be vfile->datasz * hint value.
+	 *
+	 * If ->begin() is given, we always expect the latter do the
+	 * allocation for us regardless of the hint value. Otherwise,
+	 * a NULL return from ->rewind() tells us that the vfile won't
+	 * output any snapshot data via ->show().
+	 */
+	nrdata = 0;
+	if (ops->rewind) {
+		nrdata = ops->rewind(it);
+		if (nrdata < 0) {
+			ret = nrdata;
+			vfile->entry.lockops->put(&vfile->entry);
+			goto fail;
+		}
+	}
+	revtag = vfile->tag->rev;
+
+	vfile->entry.lockops->put(&vfile->entry);
+
+	/* Release the data buffer, in case we had to restart. */
+	if (it->databuf) {
+		it->endfn(it, it->databuf);
+		it->databuf = NULL;
+	}
+
+	/*
+	 * Having no record to output is fine, in which case ->begin()
+	 * shall return VFILE_SEQ_EMPTY if present. ->begin() may be
+	 * absent, meaning that no allocation is even required to
+	 * collect the records to output. NULL is kept for allocation
+	 * errors in all other cases.
+	 */
+	if (ops->begin) {
+		XENO_BUG_ON(COBALT, ops->end == NULL);
+		data = ops->begin(it);
+		if (data == NULL) {
+			kfree(it);
+			return -ENOMEM;
+		}
+		if (data != VFILE_SEQ_EMPTY) {
+			it->databuf = data;
+			it->endfn = ops->end;
+		}
+	} else if (nrdata > 0 && vfile->datasz > 0) {
+		/* We have a hint for auto-allocation. */
+		data = kmalloc(vfile->datasz * nrdata, GFP_KERNEL);
+		if (data == NULL) {
+			kfree(it);
+			return -ENOMEM;
+		}
+		it->databuf = data;
+		it->endfn = vfile_snapshot_free;
+	}
+
+	it->nrdata = 0;
+	data = it->databuf;
+	if (data == NULL)
+		goto done;
+
+	/*
+	 * Take a snapshot of the vfile contents, redo if the revision
+	 * tag of the scanned data set changed concurrently.
+	 */
+	for (;;) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			goto fail;
+		if (vfile->tag->rev != revtag)
+			goto redo;
+		ret = ops->next(it, data);
+		vfile->entry.lockops->put(&vfile->entry);
+		if (ret < 0)
+			goto fail;
+		if (ret == 0)
+			break;
+		if (ret != VFILE_SEQ_SKIP) {
+			data += vfile->datasz;
+			it->nrdata++;
+		}
+	}
+
+done:
+	ret = seq_open(file, &vfile_snapshot_ops);
+	if (ret)
+		goto fail;
+
+	seq = file->private_data;
+	it->seq = seq;
+	seq->private = it;
+	xnvfile_nref(vfile)++;
+
+	return 0;
+
+fail:
+	if (it->databuf)
+		it->endfn(it, it->databuf);
+	kfree(it);
+
+	return ret;
+}
+
+static int vfile_snapshot_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+	struct xnvfile_snapshot_iterator *it;
+
+	if (seq) {
+		it = seq->private;
+		if (it) {
+			--xnvfile_nref(it->vfile);
+			XENO_BUG_ON(COBALT, it->vfile->entry.refcnt < 0);
+			if (it->databuf)
+				it->endfn(it, it->databuf);
+			kfree(it);
+		}
+
+		return seq_release(inode, file);
+	}
+
+	return 0;
+}
+
+ssize_t vfile_snapshot_write(struct file *file, const char __user *buf,
+			     size_t size, loff_t *ppos)
+{
+	struct xnvfile_snapshot *vfile =
+		PDE_DATA(file->f_path.dentry->d_inode);
+	struct xnvfile_input input;
+	ssize_t ret;
+
+	if (vfile->entry.lockops) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			return ret;
+	}
+
+	input.u_buf = buf;
+	input.size = size;
+	input.vfile = &vfile->entry;
+
+	ret = vfile->ops->store(&input);
+
+	if (vfile->entry.lockops)
+		vfile->entry.lockops->put(&vfile->entry);
+
+	return ret;
+}
+
+static const DEFINE_PROC_OPS(vfile_snapshot_fops,
+			vfile_snapshot_open,
+			vfile_snapshot_release,
+			seq_read,
+			vfile_snapshot_write);
+
+/**
+ * @fn int xnvfile_init_snapshot(const char *name, struct xnvfile_snapshot *vfile, struct xnvfile_directory *parent)
+ * @brief Initialize a snapshot-driven vfile.
+ *
+ * @param name The name which should appear in the pseudo-filesystem,
+ * identifying the vfile entry.
+ *
+ * @param vfile A pointer to a vfile descriptor to initialize
+ * from. The following fields in this structure should be filled in
+ * prior to call this routine:
+ *
+ * - .privsz is the size (in bytes) of the private data area to be
+ * reserved in the @ref snapshot_iterator "vfile iterator". A NULL
+ * value indicates that no private area should be reserved.
+ *
+ * - .datasz is the size (in bytes) of a single record to be collected
+ * by the @ref snapshot_next "next() handler" from the @ref
+ * snapshot_ops "operation descriptor".
+ *
+ * - .tag is a pointer to a mandatory vfile revision tag structure
+ * (struct xnvfile_rev_tag). This tag will be monitored for changes by
+ * the vfile core while collecting data to output, so that any update
+ * detected will cause the current snapshot data to be dropped, and
+ * the collection to restart from the beginning. To this end, any
+ * change to the data which may be part of the collected records,
+ * should also invoke xnvfile_touch() on the associated tag.
+ *
+ * - entry.lockops is a pointer to a @ref vfile_lockops "lock descriptor",
+ * defining the lock and unlock operations for the vfile. This pointer
+ * may be left to NULL, in which case the operations on the nucleus
+ * lock (i.e. nklock) will be used internally around calls to data
+ * collection handlers (see @ref snapshot_ops "operation descriptor").
+ *
+ * - .ops is a pointer to an @ref snapshot_ops "operation descriptor".
+ *
+ * @param parent A pointer to a virtual directory descriptor; the
+ * vfile entry will be created into this directory. If NULL, the /proc
+ * root directory will be used. /proc/xenomai is mapped on the
+ * globally available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual file entry cannot be created
+ * in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_snapshot(const char *name,
+			  struct xnvfile_snapshot *vfile,
+			  struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+	int mode;
+
+	XENO_BUG_ON(COBALT, vfile->tag == NULL);
+
+	if (vfile->entry.lockops == NULL)
+		/* Defaults to nucleus lock */
+		vfile->entry.lockops = &xnvfile_nucleus_lock.ops;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	mode = vfile->ops->store ? 0644 : 0444;
+	ppde = parent->entry.pde;
+	pde = proc_create_data(name, mode, ppde, &vfile_snapshot_fops, vfile);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vfile->entry.pde = pde;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_snapshot);
+
+static void *vfile_regular_start(struct seq_file *seq, loff_t *offp)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+	int ret;
+
+	it->pos = *offp;
+
+	if (vfile->entry.lockops) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+
+	/*
+	 * If we have no begin() op, then we allow a single call only
+	 * to ->show(), by returning the start token once. Otherwise,
+	 * we are done.
+	 */
+	if (vfile->ops->begin == NULL)
+		return it->pos > 0 ? NULL : SEQ_START_TOKEN;
+
+	return vfile->ops->begin(it);
+}
+
+static void *vfile_regular_next(struct seq_file *seq, void *v, loff_t *offp)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+	void *data;
+
+	it->pos = ++(*offp);
+
+	if (vfile->ops->next == NULL)
+		return NULL;
+
+	data = vfile->ops->next(it);
+	if (data == NULL)
+		return NULL;
+
+	return data;
+}
+
+static void vfile_regular_stop(struct seq_file *seq, void *v)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+
+	if (vfile->entry.lockops)
+		vfile->entry.lockops->put(&vfile->entry);
+
+	if (vfile->ops->end)
+		vfile->ops->end(it);
+}
+
+static int vfile_regular_show(struct seq_file *seq, void *v)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+	void *data = v == SEQ_START_TOKEN ? NULL : v;
+	int ret;
+
+	ret = vfile->ops->show(it, data);
+
+	return ret == VFILE_SEQ_SKIP ? SEQ_SKIP : ret;
+}
+
+static struct seq_operations vfile_regular_ops = {
+	.start = vfile_regular_start,
+	.next = vfile_regular_next,
+	.stop = vfile_regular_stop,
+	.show = vfile_regular_show
+};
+
+static int vfile_regular_open(struct inode *inode, struct file *file)
+{
+	struct xnvfile_regular *vfile = PDE_DATA(inode);
+	struct xnvfile_regular_ops *ops = vfile->ops;
+	struct xnvfile_regular_iterator *it;
+	struct seq_file *seq;
+	int ret;
+
+	if ((file->f_flags & O_EXCL) != 0 && xnvfile_nref(vfile) > 0)
+		return -EBUSY;
+
+	if ((file->f_mode & FMODE_WRITE) != 0 && ops->store == NULL)
+		return -EACCES;
+
+	if ((file->f_mode & FMODE_READ) == 0) {
+		file->private_data = NULL;
+		return 0;
+	}
+
+	it = kzalloc(sizeof(*it) + vfile->privsz, GFP_KERNEL);
+	if (it == NULL)
+		return -ENOMEM;
+
+	it->vfile = vfile;
+	it->pos = -1;
+	xnvfile_file(vfile) = file;
+
+	if (ops->rewind) {
+		ret = ops->rewind(it);
+		if (ret) {
+		fail:
+			kfree(it);
+			return ret;
+		}
+	}
+
+	ret = seq_open(file, &vfile_regular_ops);
+	if (ret)
+		goto fail;
+
+	seq = file->private_data;
+	it->seq = seq;
+	seq->private = it;
+	xnvfile_nref(vfile)++;
+
+	return 0;
+}
+
+static int vfile_regular_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+	struct xnvfile_regular_iterator *it;
+
+	if (seq) {
+		it = seq->private;
+		if (it) {
+			--xnvfile_nref(it->vfile);
+			XENO_BUG_ON(COBALT, xnvfile_nref(it->vfile) < 0);
+			kfree(it);
+		}
+
+		return seq_release(inode, file);
+	}
+
+	return 0;
+}
+
+ssize_t vfile_regular_write(struct file *file, const char __user *buf,
+			    size_t size, loff_t *ppos)
+{
+	struct xnvfile_regular *vfile =
+		PDE_DATA(file->f_path.dentry->d_inode);
+	struct xnvfile_input input;
+	ssize_t ret;
+
+	if (vfile->entry.lockops) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			return ret;
+	}
+
+	input.u_buf = buf;
+	input.size = size;
+	input.vfile = &vfile->entry;
+
+	ret = vfile->ops->store(&input);
+
+	if (vfile->entry.lockops)
+		vfile->entry.lockops->put(&vfile->entry);
+
+	return ret;
+}
+
+static const DEFINE_PROC_OPS(vfile_regular_fops,
+			vfile_regular_open,
+			vfile_regular_release,
+			seq_read,
+			vfile_regular_write);
+
+/**
+ * @fn int xnvfile_init_regular(const char *name, struct xnvfile_regular *vfile, struct xnvfile_directory *parent)
+ * @brief Initialize a regular vfile.
+ *
+ * @param name The name which should appear in the pseudo-filesystem,
+ * identifying the vfile entry.
+ *
+ * @param vfile A pointer to a vfile descriptor to initialize
+ * from. The following fields in this structure should be filled in
+ * prior to call this routine:
+ *
+ * - .privsz is the size (in bytes) of the private data area to be
+ * reserved in the @ref regular_iterator "vfile iterator". A NULL
+ * value indicates that no private area should be reserved.
+ *
+ * - entry.lockops is a pointer to a @ref vfile_lockops "locking
+ * descriptor", defining the lock and unlock operations for the
+ * vfile. This pointer may be left to NULL, in which case no
+ * locking will be applied.
+ *
+ * - .ops is a pointer to an @ref regular_ops "operation descriptor".
+ *
+ * @param parent A pointer to a virtual directory descriptor; the
+ * vfile entry will be created into this directory. If NULL, the /proc
+ * root directory will be used. /proc/xenomai is mapped on the
+ * globally available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual file entry cannot be created
+ * in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_regular(const char *name,
+			 struct xnvfile_regular *vfile,
+			 struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+	int mode;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	mode = vfile->ops->store ? 0644 : 0444;
+	ppde = parent->entry.pde;
+	pde = proc_create_data(name, mode, ppde, &vfile_regular_fops, vfile);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vfile->entry.pde = pde;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_regular);
+
+/**
+ * @fn int xnvfile_init_dir(const char *name, struct xnvfile_directory *vdir, struct xnvfile_directory *parent)
+ * @brief Initialize a virtual directory entry.
+ *
+ * @param name The name which should appear in the pseudo-filesystem,
+ * identifying the vdir entry.
+ *
+ * @param vdir A pointer to the virtual directory descriptor to
+ * initialize.
+ *
+ * @param parent A pointer to a virtual directory descriptor standing
+ * for the parent directory of the new vdir.  If NULL, the /proc root
+ * directory will be used. /proc/xenomai is mapped on the globally
+ * available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual directory entry cannot be
+ * created in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_dir(const char *name,
+		     struct xnvfile_directory *vdir,
+		     struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	ppde = parent->entry.pde;
+	pde = proc_mkdir(name, ppde);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vdir->entry.pde = pde;
+	vdir->entry.lockops = NULL;
+	vdir->entry.private = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_dir);
+
+/**
+ * @fn int xnvfile_init_link(const char *from, const char *to, struct xnvfile_link *vlink, struct xnvfile_directory *parent)
+ * @brief Initialize a virtual link entry.
+ *
+ * @param from The name which should appear in the pseudo-filesystem,
+ * identifying the vlink entry.
+ *
+ * @param to The target file name which should be referred to
+ * symbolically by @a name.
+ *
+ * @param vlink A pointer to the virtual link descriptor to
+ * initialize.
+ *
+ * @param parent A pointer to a virtual directory descriptor standing
+ * for the parent directory of the new vlink. If NULL, the /proc root
+ * directory will be used. /proc/xenomai is mapped on the globally
+ * available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual link entry cannot be created
+ * in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_link(const char *from,
+		      const char *to,
+		      struct xnvfile_link *vlink,
+		      struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	ppde = parent->entry.pde;
+	pde = proc_symlink(from, ppde, to);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vlink->entry.pde = pde;
+	vlink->entry.lockops = NULL;
+	vlink->entry.private = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_link);
+
+/**
+ * @fn void xnvfile_destroy(struct xnvfile *vfile)
+ * @brief Removes a virtual file entry.
+ *
+ * @param vfile A pointer to the virtual file descriptor to
+ * remove.
+ *
+ * @coretags{secondary-only}
+ */
+void xnvfile_destroy(struct xnvfile *vfile)
+{
+	proc_remove(vfile->pde);
+}
+EXPORT_SYMBOL_GPL(xnvfile_destroy);
+
+/**
+ * @fn ssize_t xnvfile_get_blob(struct xnvfile_input *input, void *data, size_t size)
+ * @brief Read in a data bulk written to the vfile.
+ *
+ * When writing to a vfile, the associated store() handler from the
+ * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
+ * "regular vfile" is called, with a single argument describing the
+ * input data. xnvfile_get_blob() retrieves this data as an untyped
+ * binary blob, and copies it back to the caller's buffer.
+ *
+ * @param input A pointer to the input descriptor passed to the
+ * store() handler.
+ *
+ * @param data The address of the destination buffer to copy the input
+ * data to.
+ *
+ * @param size The maximum number of bytes to copy to the destination
+ * buffer. If @a size is larger than the actual data size, the input
+ * is truncated to @a size.
+ *
+ * @return The number of bytes read and copied to the destination
+ * buffer upon success. Otherwise, a negative error code is returned:
+ *
+ * - -EFAULT indicates an invalid source buffer address.
+ *
+ * @coretags{secondary-only}
+ */
+ssize_t xnvfile_get_blob(struct xnvfile_input *input,
+			 void *data, size_t size)
+{
+	ssize_t nbytes = input->size;
+
+	if (nbytes > size)
+		nbytes = size;
+
+	if (nbytes > 0 && copy_from_user(data, input->u_buf, nbytes))
+		return -EFAULT;
+
+	return nbytes;
+}
+EXPORT_SYMBOL_GPL(xnvfile_get_blob);
+
+/**
+ * @fn ssize_t xnvfile_get_string(struct xnvfile_input *input, char *s, size_t maxlen)
+ * @brief Read in a C-string written to the vfile.
+ *
+ * When writing to a vfile, the associated store() handler from the
+ * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
+ * "regular vfile" is called, with a single argument describing the
+ * input data. xnvfile_get_string() retrieves this data as a
+ * null-terminated character string, and copies it back to the
+ * caller's buffer.
+ *
+ * @param input A pointer to the input descriptor passed to the
+ * store() handler.
+ *
+ * @param s The address of the destination string buffer to copy the
+ * input data to.
+ *
+ * @param maxlen The maximum number of bytes to copy to the
+ * destination buffer, including the ending null character. If @a
+ * maxlen is larger than the actual string length, the input is
+ * truncated to @a maxlen.
+ *
+ * @return The number of characters read upon success. Otherwise, a
+ * negative error code is returned:
+ *
+ * - -EFAULT indicates an invalid source buffer address.
+ *
+ * @coretags{secondary-only}
+ */
+ssize_t xnvfile_get_string(struct xnvfile_input *input,
+			   char *s, size_t maxlen)
+{
+	ssize_t nbytes, eol;
+
+	if (maxlen < 1)
+		return -EINVAL;
+
+	nbytes = xnvfile_get_blob(input, s, maxlen - 1);
+	if (nbytes < 0)
+		return nbytes;
+
+	eol = nbytes;
+	if (eol > 0 && s[eol - 1] == '\n')
+		eol--;
+
+	s[eol] = '\0';
+
+	return nbytes;
+}
+EXPORT_SYMBOL_GPL(xnvfile_get_string);
+
+/**
+ * @fn ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp)
+ * @brief Evaluate the string written to the vfile as a long integer.
+ *
+ * When writing to a vfile, the associated store() handler from the
+ * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
+ * "regular vfile" is called, with a single argument describing the
+ * input data. xnvfile_get_integer() retrieves and interprets this
+ * data as a long integer, and copies the resulting value back to @a
+ * valp.
+ *
+ * The long integer can be expressed in decimal, octal or hexadecimal
+ * bases depending on the prefix found.
+ *
+ * @param input A pointer to the input descriptor passed to the
+ * store() handler.
+ *
+ * @param valp The address of a long integer variable to receive the
+ * value.
+ *
+ * @return The number of characters read while evaluating the input as
+ * a long integer upon success. Otherwise, a negative error code is
+ * returned:
+ *
+ * - -EINVAL indicates a parse error on the input stream; the written
+ * text cannot be evaluated as a long integer.
+ *
+ * - -EFAULT indicates an invalid source buffer address.
+ *
+ * @coretags{secondary-only}
+ */
+ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp)
+{
+	char *end, buf[32];
+	ssize_t nbytes;
+	long val;
+
+	nbytes = xnvfile_get_blob(input, buf, sizeof(buf) - 1);
+	if (nbytes < 0)
+		return nbytes;
+
+	if (nbytes == 0)
+		return -EINVAL;
+
+	buf[nbytes] = '\0';
+	val = simple_strtol(buf, &end, 0);
+
+	if (*end != '\0' && !isspace(*end))
+		return -EINVAL;
+
+	*valp = val;
+
+	return nbytes;
+}
+EXPORT_SYMBOL_GPL(xnvfile_get_integer);
+
+int __vfile_hostlock_get(struct xnvfile *vfile)
+{
+	struct xnvfile_hostlock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_hostlock_class, ops);
+	mutex_lock(&lc->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__vfile_hostlock_get);
+
+void __vfile_hostlock_put(struct xnvfile *vfile)
+{
+	struct xnvfile_hostlock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_hostlock_class, ops);
+	mutex_unlock(&lc->mutex);
+}
+EXPORT_SYMBOL_GPL(__vfile_hostlock_put);
+
+static int __vfile_nklock_get(struct xnvfile *vfile)
+{
+	struct xnvfile_nklock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_nklock_class, ops);
+	xnlock_get_irqsave(&nklock, lc->s);
+
+	return 0;
+}
+
+static void __vfile_nklock_put(struct xnvfile *vfile)
+{
+	struct xnvfile_nklock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_nklock_class, ops);
+	xnlock_put_irqrestore(&nklock, lc->s);
+}
+
+struct xnvfile_nklock_class xnvfile_nucleus_lock = {
+	.ops = {
+		.get = __vfile_nklock_get,
+		.put = __vfile_nklock_put,
+	},
+};
+
+int __init xnvfile_init_root(void)
+{
+	struct xnvfile_directory *vdir = &cobalt_vfroot;
+	struct proc_dir_entry *pde;
+
+	pde = proc_mkdir("xenomai", NULL);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vdir->entry.pde = pde;
+	vdir->entry.lockops = NULL;
+	vdir->entry.private = NULL;
+
+	return 0;
+}
+
+void xnvfile_destroy_root(void)
+{
+	cobalt_vfroot.entry.pde = NULL;
+	remove_proc_entry("xenomai", NULL);
+}
+
+/** @} */
+++ linux-patched/kernel/xenomai/init.c	2022-03-21 12:58:28.872894003 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <xenomai/version.h>
+#include <pipeline/machine.h>
+#include <pipeline/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/pipe.h>
+#include <cobalt/kernel/select.h>
+#include <cobalt/kernel/vdso.h>
+#include <rtdm/fd.h>
+#include "rtdm/internal.h"
+#include "posix/internal.h"
+#include "procfs.h"
+
+/**
+ * @defgroup cobalt Cobalt
+ *
+ * Cobalt supplements the native Linux kernel in dual kernel
+ * configurations. It deals with all time-critical activities, such as
+ * handling interrupts, and scheduling real-time threads. The Cobalt
+ * kernel has higher priority over all the native kernel activities.
+ *
+ * Cobalt provides an implementation of the POSIX and RTDM interfaces
+ * based on a set of generic RTOS building blocks.
+ */
+
+#ifdef CONFIG_SMP
+static unsigned long supported_cpus_arg = -1;
+module_param_named(supported_cpus, supported_cpus_arg, ulong, 0444);
+#endif /* CONFIG_SMP */
+
+static unsigned long sysheap_size_arg;
+module_param_named(sysheap_size, sysheap_size_arg, ulong, 0444);
+
+static char init_state_arg[16] = "enabled";
+module_param_string(state, init_state_arg, sizeof(init_state_arg), 0444);
+
+static BLOCKING_NOTIFIER_HEAD(state_notifier_list);
+
+struct cobalt_pipeline cobalt_pipeline;
+EXPORT_SYMBOL_GPL(cobalt_pipeline);
+
+DEFINE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata);
+EXPORT_PER_CPU_SYMBOL_GPL(cobalt_machine_cpudata);
+
+atomic_t cobalt_runstate = ATOMIC_INIT(COBALT_STATE_WARMUP);
+EXPORT_SYMBOL_GPL(cobalt_runstate);
+
+struct cobalt_ppd cobalt_kernel_ppd = {
+	.exe_path = "vmlinux",
+};
+EXPORT_SYMBOL_GPL(cobalt_kernel_ppd);
+
+#ifdef CONFIG_XENO_OPT_DEBUG
+#define boot_debug_notice "[DEBUG]"
+#else
+#define boot_debug_notice ""
+#endif
+
+#ifdef CONFIG_ENABLE_DEFAULT_TRACERS
+#define boot_evt_trace_notice "[ETRACE]"
+#else
+#define boot_evt_trace_notice ""
+#endif
+
+#define boot_state_notice						\
+	({								\
+		realtime_core_state() == COBALT_STATE_STOPPED ?		\
+			"[STOPPED]" : "";				\
+	})
+
+void cobalt_add_state_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_register(&state_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_add_state_chain);
+
+void cobalt_remove_state_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&state_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_remove_state_chain);
+
+void cobalt_call_state_chain(enum cobalt_run_states newstate)
+{
+	blocking_notifier_call_chain(&state_notifier_list, newstate, NULL);
+}
+EXPORT_SYMBOL_GPL(cobalt_call_state_chain);
+
+static void sys_shutdown(void)
+{
+	void *membase;
+
+	pipeline_uninstall_tick_proxy();
+	xnsched_destroy_all();
+	xnregistry_cleanup();
+	membase = xnheap_get_membase(&cobalt_heap);
+	xnheap_destroy(&cobalt_heap);
+	xnheap_vfree(membase);
+}
+
+static struct {
+	const char *label;
+	enum cobalt_run_states state;
+} init_states[] __initdata = {
+	{ "disabled", COBALT_STATE_DISABLED },
+	{ "stopped", COBALT_STATE_STOPPED },
+	{ "enabled", COBALT_STATE_WARMUP },
+};
+
+static void __init setup_init_state(void)
+{
+	static char warn_bad_state[] __initdata =
+		XENO_WARNING "invalid init state '%s'\n";
+	int n;
+
+	for (n = 0; n < ARRAY_SIZE(init_states); n++)
+		if (strcmp(init_states[n].label, init_state_arg) == 0) {
+			set_realtime_core_state(init_states[n].state);
+			return;
+		}
+
+	printk(warn_bad_state, init_state_arg);
+}
+
+static __init int sys_init(void)
+{
+	void *heapaddr;
+	int ret;
+
+	if (sysheap_size_arg == 0)
+		sysheap_size_arg = CONFIG_XENO_OPT_SYS_HEAPSZ;
+
+	heapaddr = xnheap_vmalloc(sysheap_size_arg * 1024);
+	if (heapaddr == NULL ||
+	    xnheap_init(&cobalt_heap, heapaddr, sysheap_size_arg * 1024)) {
+		return -ENOMEM;
+	}
+	xnheap_set_name(&cobalt_heap, "system heap");
+
+	xnsched_init_all();
+
+	xnregistry_init();
+
+	/*
+	 * If starting in stopped mode, do all initializations, but do
+	 * not enable the core timer.
+	 */
+	if (realtime_core_state() == COBALT_STATE_WARMUP) {
+		ret = pipeline_install_tick_proxy();
+		if (ret) {
+			sys_shutdown();
+			return ret;
+		}
+		set_realtime_core_state(COBALT_STATE_RUNNING);
+	}
+
+	return 0;
+}
+
+static int __init xenomai_init(void)
+{
+	int ret, __maybe_unused cpu;
+
+	setup_init_state();
+
+	if (!realtime_core_enabled()) {
+		printk(XENO_WARNING "disabled on kernel command line\n");
+		return 0;
+	}
+
+#ifdef CONFIG_SMP
+	cpumask_clear(&xnsched_realtime_cpus);
+	for_each_online_cpu(cpu) {
+		if (supported_cpus_arg & (1UL << cpu))
+			cpumask_set_cpu(cpu, &xnsched_realtime_cpus);
+	}
+	if (cpumask_empty(&xnsched_realtime_cpus)) {
+		printk(XENO_WARNING "disabled via empty real-time CPU mask\n");
+		set_realtime_core_state(COBALT_STATE_DISABLED);
+		return 0;
+	}
+	if (!cpumask_test_cpu(0, &xnsched_realtime_cpus)) {
+		printk(XENO_ERR "CPU 0 is missing in real-time CPU mask\n");
+		set_realtime_core_state(COBALT_STATE_DISABLED);
+		return -EINVAL;
+	}
+	cobalt_cpu_affinity = xnsched_realtime_cpus;
+#endif /* CONFIG_SMP */
+
+	xnsched_register_classes();
+
+	ret = xnprocfs_init_tree();
+	if (ret)
+		goto fail;
+
+	ret = pipeline_init();
+	if (ret)
+		goto cleanup_proc;
+
+	xnintr_mount();
+
+	ret = xnpipe_mount();
+	if (ret)
+		goto cleanup_mach;
+
+	ret = xnselect_mount();
+	if (ret)
+		goto cleanup_pipe;
+
+	ret = sys_init();
+	if (ret)
+		goto cleanup_select;
+
+	ret = pipeline_late_init();
+	if (ret)
+		goto cleanup_sys;
+
+	ret = rtdm_init();
+	if (ret)
+		goto cleanup_sys;
+
+	ret = cobalt_init();
+	if (ret)
+		goto cleanup_rtdm;
+
+	rtdm_fd_init();
+
+	printk(XENO_INFO "Cobalt v%s %s%s%s%s\n",
+	       XENO_VERSION_STRING,
+	       boot_debug_notice,
+	       boot_lat_trace_notice,
+	       boot_evt_trace_notice,
+	       boot_state_notice);
+
+	return 0;
+
+cleanup_rtdm:
+	rtdm_cleanup();
+cleanup_sys:
+	sys_shutdown();
+cleanup_select:
+	xnselect_umount();
+cleanup_pipe:
+	xnpipe_umount();
+cleanup_mach:
+	pipeline_cleanup();
+cleanup_proc:
+	xnprocfs_cleanup_tree();
+fail:
+	set_realtime_core_state(COBALT_STATE_DISABLED);
+	printk(XENO_ERR "init failed, code %d\n", ret);
+
+	return ret;
+}
+device_initcall(xenomai_init);
+
+/**
+ * @ingroup cobalt
+ * @defgroup cobalt_core Cobalt kernel
+ *
+ * The Cobalt core is a co-kernel which supplements the Linux kernel
+ * for delivering real-time services with very low latency. It
+ * implements a set of generic RTOS building blocks, which the
+ * Cobalt/POSIX and Cobalt/RTDM APIs are based on.  Cobalt has higher
+ * priority over the Linux kernel activities.
+ *
+ * @{
+ *
+ * @page cobalt-core-tags Dual kernel service tags
+ *
+ * The Cobalt kernel services may be restricted to particular calling
+ * contexts, or entail specific side-effects. To describe this
+ * information, each service documented by this section bears a set of
+ * tags when applicable.
+ *
+ * The table below matches the tags used throughout the documentation
+ * with the description of their meaning for the caller.
+ *
+ * @par
+ * <b>Context tags</b>
+ * <TABLE>
+ * <TR><TH>Tag</TH> <TH>Context on entry</TH></TR>
+ * <TR><TD>primary-only</TD>	<TD>Must be called from a Cobalt task in primary mode</TD></TR>
+ * <TR><TD>primary-timed</TD>	<TD>Requires a Cobalt task in primary mode if timed</TD></TR>
+ * <TR><TD>coreirq-only</TD>	<TD>Must be called from a Cobalt IRQ handler</TD></TR>
+ * <TR><TD>secondary-only</TD>	<TD>Must be called from a Cobalt task in secondary mode or regular Linux task</TD></TR>
+ * <TR><TD>rtdm-task</TD>	<TD>Must be called from a RTDM driver task</TD></TR>
+ * <TR><TD>mode-unrestricted</TD>	<TD>May be called from a Cobalt task in either primary or secondary mode</TD></TR>
+ * <TR><TD>task-unrestricted</TD>	<TD>May be called from a Cobalt or regular Linux task indifferently</TD></TR>
+ * <TR><TD>unrestricted</TD>	<TD>May be called from any context previously described</TD></TR>
+ * <TR><TD>atomic-entry</TD>	<TD>Caller must currently hold the big Cobalt kernel lock (nklock)</TD></TR>
+ * </TABLE>
+ *
+ * @par
+ * <b>Possible side-effects</b>
+ * <TABLE>
+ * <TR><TH>Tag</TH> <TH>Description</TH></TR>
+ * <TR><TD>might-switch</TD>	<TD>The Cobalt kernel may switch context</TD></TR>
+ * </TABLE>
+ *
+ * @}
+ */
+++ linux-patched/kernel/xenomai/sched.c	2022-03-21 12:58:28.868894042 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/bufd.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/arith.h>
+#include <cobalt/uapi/signal.h>
+#include <pipeline/sched.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/cobalt-core.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_sched Thread scheduling control
+ * @{
+ */
+
+DEFINE_PER_CPU(struct xnsched, nksched);
+EXPORT_PER_CPU_SYMBOL_GPL(nksched);
+
+cpumask_t cobalt_cpu_affinity = CPU_MASK_ALL;
+EXPORT_SYMBOL_GPL(cobalt_cpu_affinity);
+
+LIST_HEAD(nkthreadq);
+
+int cobalt_nrthreads;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+struct xnvfile_rev_tag nkthreadlist_tag;
+#endif
+
+static struct xnsched_class *xnsched_class_highest;
+
+#define for_each_xnsched_class(p) \
+   for (p = xnsched_class_highest; p; p = p->next)
+
+static void xnsched_register_class(struct xnsched_class *sched_class)
+{
+	sched_class->next = xnsched_class_highest;
+	xnsched_class_highest = sched_class;
+
+	/*
+	 * Classes shall be registered by increasing priority order,
+	 * idle first and up.
+	 */
+	XENO_BUG_ON(COBALT, sched_class->next &&
+		   sched_class->next->weight > sched_class->weight);
+
+	printk(XENO_INFO "scheduling class %s registered.\n", sched_class->name);
+}
+
+void xnsched_register_classes(void)
+{
+	xnsched_register_class(&xnsched_class_idle);
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	xnsched_register_class(&xnsched_class_weak);
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	xnsched_register_class(&xnsched_class_tp);
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	xnsched_register_class(&xnsched_class_sporadic);
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	xnsched_register_class(&xnsched_class_quota);
+#endif
+	xnsched_register_class(&xnsched_class_rt);
+}
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+
+static unsigned long wd_timeout_arg = CONFIG_XENO_OPT_WATCHDOG_TIMEOUT;
+module_param_named(watchdog_timeout, wd_timeout_arg, ulong, 0644);
+
+static inline xnticks_t get_watchdog_timeout(void)
+{
+	return wd_timeout_arg * 1000000000ULL;
+}
+
+/**
+ * @internal
+ * @fn void watchdog_handler(struct xntimer *timer)
+ * @brief Process watchdog ticks.
+ *
+ * This internal routine handles incoming watchdog triggers to detect
+ * software lockups. It forces the offending thread to stop
+ * monopolizing the CPU, either by kicking it out of primary mode if
+ * running in user space, or cancelling it if kernel-based.
+ *
+ * @coretags{coreirq-only, atomic-entry}
+ */
+static void watchdog_handler(struct xntimer *timer)
+{
+	struct xnsched *sched = xnsched_current();
+	struct xnthread *curr = sched->curr;
+
+	/*
+	 * CAUTION: The watchdog tick might have been delayed while we
+	 * were busy switching the CPU to secondary mode at the
+	 * trigger date eventually. Make sure that we are not about to
+	 * kick the incoming root thread.
+	 */
+	if (xnthread_test_state(curr, XNROOT))
+ 		return;
+
+	trace_cobalt_watchdog_signal(curr);
+
+	if (xnthread_test_state(curr, XNUSER)) {
+		printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread "
+		       "'%s' signaled\n", xnsched_cpu(sched), curr->name);
+		xnthread_call_mayday(curr, SIGDEBUG_WATCHDOG);
+	} else {
+		printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread "
+		       "'%s' canceled\n", xnsched_cpu(sched), curr->name);
+		/*
+		 * On behalf on an IRQ handler, xnthread_cancel()
+		 * would go half way cancelling the preempted
+		 * thread. Therefore we manually raise XNKICKED to
+		 * cause the next call to xnthread_suspend() to return
+		 * early in XNBREAK condition, and XNCANCELD so that
+		 * @thread exits next time it invokes
+		 * xnthread_test_cancel().
+		 */
+		xnthread_set_info(curr, XNKICKED|XNCANCELD);
+	}
+}
+
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+
+static void roundrobin_handler(struct xntimer *timer)
+{
+	struct xnsched *sched = container_of(timer, struct xnsched, rrbtimer);
+	xnsched_tick(sched);
+}
+
+static void xnsched_init(struct xnsched *sched, int cpu)
+{
+	char rrbtimer_name[XNOBJECT_NAME_LEN];
+	char htimer_name[XNOBJECT_NAME_LEN];
+	char root_name[XNOBJECT_NAME_LEN];
+	union xnsched_policy_param param;
+	struct xnthread_init_attr attr;
+	struct xnsched_class *p;
+
+#ifdef CONFIG_SMP
+	sched->cpu = cpu;
+	ksformat(htimer_name, sizeof(htimer_name), "[host-timer/%u]", cpu);
+	ksformat(rrbtimer_name, sizeof(rrbtimer_name), "[rrb-timer/%u]", cpu);
+	ksformat(root_name, sizeof(root_name), "ROOT/%u", cpu);
+	cpumask_clear(&sched->resched);
+#else
+	strcpy(htimer_name, "[host-timer]");
+	strcpy(rrbtimer_name, "[rrb-timer]");
+	strcpy(root_name, "ROOT");
+#endif
+	for_each_xnsched_class(p) {
+		if (p->sched_init)
+			p->sched_init(sched);
+	}
+
+	sched->status = 0;
+	sched->lflags = XNIDLE;
+	sched->inesting = 0;
+	sched->curr = &sched->rootcb;
+
+	attr.flags = XNROOT | XNFPU;
+	attr.name = root_name;
+	attr.personality = &xenomai_personality;
+	attr.affinity = *cpumask_of(cpu);
+	param.idle.prio = XNSCHED_IDLE_PRIO;
+
+	__xnthread_init(&sched->rootcb, &attr,
+			sched, &xnsched_class_idle, &param);
+
+	/*
+	 * No direct handler here since the host timer processing is
+	 * postponed to xnintr_irq_handler(), as part of the interrupt
+	 * exit code.
+	 */
+	xntimer_init(&sched->htimer, &nkclock, NULL,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO);
+	xntimer_set_name(&sched->htimer, htimer_name);
+	xntimer_init(&sched->rrbtimer, &nkclock, roundrobin_handler,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&sched->rrbtimer, rrbtimer_name);
+	xntimer_set_priority(&sched->rrbtimer, XNTIMER_LOPRIO);
+
+	xnstat_exectime_set_current(sched, &sched->rootcb.stat.account);
+#ifdef CONFIG_XENO_ARCH_FPU
+	sched->fpuholder = &sched->rootcb;
+#endif /* CONFIG_XENO_ARCH_FPU */
+
+	pipeline_init_root_tcb(&sched->rootcb);
+	list_add_tail(&sched->rootcb.glink, &nkthreadq);
+	cobalt_nrthreads++;
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_init(&sched->wdtimer, &nkclock, watchdog_handler,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&sched->wdtimer, "[watchdog]");
+	xntimer_set_priority(&sched->wdtimer, XNTIMER_LOPRIO);
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+}
+
+void xnsched_init_all(void)
+{
+	struct xnsched *sched;
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		sched = &per_cpu(nksched, cpu);
+		xnsched_init(sched, cpu);
+	}
+
+	pipeline_request_resched_ipi(__xnsched_run_handler);
+}
+
+static void xnsched_destroy(struct xnsched *sched)
+{
+	xntimer_destroy(&sched->htimer);
+	xntimer_destroy(&sched->rrbtimer);
+	xntimer_destroy(&sched->rootcb.ptimer);
+	xntimer_destroy(&sched->rootcb.rtimer);
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_destroy(&sched->wdtimer);
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+}
+
+void xnsched_destroy_all(void)
+{
+	struct xnthread *thread, *tmp;
+	struct xnsched *sched;
+	int cpu;
+	spl_t s;
+
+	pipeline_free_resched_ipi();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/* NOTE: &nkthreadq can't be empty (root thread(s)). */
+	list_for_each_entry_safe(thread, tmp, &nkthreadq, glink) {
+		if (!xnthread_test_state(thread, XNROOT))
+			xnthread_cancel(thread);
+	}
+
+	xnsched_run();
+
+	for_each_online_cpu(cpu) {
+		sched = xnsched_struct(cpu);
+		xnsched_destroy(sched);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+static inline void set_thread_running(struct xnsched *sched,
+				      struct xnthread *thread)
+{
+	xnthread_clear_state(thread, XNREADY);
+	if (xnthread_test_state(thread, XNRRB))
+		xntimer_start(&sched->rrbtimer,
+			      thread->rrperiod, XN_INFINITE, XN_RELATIVE);
+	else
+		xntimer_stop(&sched->rrbtimer);
+}
+
+/* Must be called with nklock locked, interrupts off. */
+struct xnthread *xnsched_pick_next(struct xnsched *sched)
+{
+	struct xnsched_class *p __maybe_unused;
+	struct xnthread *curr = sched->curr;
+	struct xnthread *thread;
+
+	if (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS | XNZOMBIE)) {
+		/*
+		 * Do not preempt the current thread if it holds the
+		 * scheduler lock.
+		 */
+		if (curr->lock_count > 0) {
+			xnsched_set_self_resched(sched);
+			return curr;
+		}
+		/*
+		 * Push the current thread back to the run queue of
+		 * the scheduling class it belongs to, if not yet
+		 * linked to it (XNREADY tells us if it is).
+		 */
+		if (!xnthread_test_state(curr, XNREADY)) {
+			xnsched_requeue(curr);
+			xnthread_set_state(curr, XNREADY);
+		}
+	}
+
+	/*
+	 * Find the runnable thread having the highest priority among
+	 * all scheduling classes, scanned by decreasing priority.
+	 */
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+	for_each_xnsched_class(p) {
+		thread = p->sched_pick(sched);
+		if (thread) {
+			set_thread_running(sched, thread);
+			return thread;
+		}
+	}
+
+	return NULL; /* Never executed because of the idle class. */
+#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
+	thread = xnsched_rt_pick(sched);
+	if (unlikely(thread == NULL))
+		thread = &sched->rootcb;
+
+	set_thread_running(sched, thread);
+
+	return thread;
+#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
+}
+
+void xnsched_lock(void)
+{
+	struct xnsched *sched = xnsched_current();
+	/* See comments in xnsched_run(), ___xnsched_run(). */
+	struct xnthread *curr = READ_ONCE(sched->curr);
+
+	if (sched->lflags & XNINIRQ)
+		return;
+
+	/*
+	 * CAUTION: The fast xnthread_current() accessor carries the
+	 * relevant lock nesting count only if current runs in primary
+	 * mode. Otherwise, if the caller is unknown or relaxed
+	 * Xenomai-wise, then we fall back to the root thread on the
+	 * current scheduler, which must be done with IRQs off.
+	 * Either way, we don't need to grab the super lock.
+	 */
+	XENO_WARN_ON_ONCE(COBALT, (curr->state & XNROOT) &&
+			  !hard_irqs_disabled());
+
+	curr->lock_count++;
+}
+EXPORT_SYMBOL_GPL(xnsched_lock);
+
+void xnsched_unlock(void)
+{
+	struct xnsched *sched = xnsched_current();
+	struct xnthread *curr = READ_ONCE(sched->curr);
+
+	XENO_WARN_ON_ONCE(COBALT, (curr->state & XNROOT) &&
+			  !hard_irqs_disabled());
+
+	if (sched->lflags & XNINIRQ)
+		return;
+
+	if (!XENO_ASSERT(COBALT, curr->lock_count > 0))
+		return;
+
+	if (--curr->lock_count == 0) {
+		xnthread_clear_localinfo(curr, XNLBALERT);
+		xnsched_run();
+	}
+}
+EXPORT_SYMBOL_GPL(xnsched_unlock);
+
+/* nklock locked, interrupts off. */
+void xnsched_putback(struct xnthread *thread)
+{
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_dequeue(thread);
+	else
+		xnthread_set_state(thread, XNREADY);
+
+	xnsched_enqueue(thread);
+	xnsched_set_resched(thread->sched);
+}
+
+/* nklock locked, interrupts off. */
+int xnsched_set_policy(struct xnthread *thread,
+		       struct xnsched_class *sched_class,
+		       const union xnsched_policy_param *p)
+{
+	struct xnsched_class *orig_effective_class __maybe_unused;
+	bool effective;
+	int ret;
+
+	ret = xnsched_chkparam(sched_class, thread, p);
+	if (ret)
+		return ret;
+
+	/*
+	 * Declaring a thread to a new scheduling class may fail, so
+	 * we do that early, while the thread is still a member of the
+	 * previous class. However, this also means that the
+	 * declaration callback shall not do anything that might
+	 * affect the previous class (such as touching thread->rlink
+	 * for instance).
+	 */
+	if (sched_class != thread->base_class) {
+		ret = xnsched_declare(sched_class, thread, p);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * As a special case, we may be called from __xnthread_init()
+	 * with no previous scheduling class at all.
+	 */
+	if (likely(thread->base_class != NULL)) {
+		if (xnthread_test_state(thread, XNREADY))
+			xnsched_dequeue(thread);
+
+		if (sched_class != thread->base_class)
+			xnsched_forget(thread);
+	}
+
+	/*
+	 * Set the base and effective scheduling parameters. However,
+	 * xnsched_setparam() will deny lowering the effective
+	 * priority if a boost is undergoing, only recording the
+	 * change into the base priority field in such situation.
+	 */
+	thread->base_class = sched_class;
+	/*
+	 * Referring to the effective class from a setparam() handler
+	 * is wrong: make sure to break if so.
+	 */
+	if (XENO_DEBUG(COBALT)) {
+		orig_effective_class = thread->sched_class;
+		thread->sched_class = NULL;
+	}
+
+	/*
+	 * This is the ONLY place where calling xnsched_setparam() is
+	 * legit, sane and safe.
+	 */
+	effective = xnsched_setparam(thread, p);
+	if (effective) {
+		thread->sched_class = sched_class;
+		thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
+	} else if (XENO_DEBUG(COBALT))
+		thread->sched_class = orig_effective_class;
+
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_enqueue(thread);
+
+	/*
+	 * Make sure not to raise XNSCHED when setting up the root
+	 * thread, so that we can't start rescheduling on interrupt
+	 * exit before all CPUs have their runqueue fully
+	 * built. Filtering on XNROOT here is correct because the root
+	 * thread enters the idle class once as part of the runqueue
+	 * setup process and never leaves it afterwards.
+	 */
+	if (!xnthread_test_state(thread, XNDORMANT|XNROOT))
+		xnsched_set_resched(thread->sched);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsched_set_policy);
+
+/* nklock locked, interrupts off. */
+bool xnsched_set_effective_priority(struct xnthread *thread, int prio)
+{
+	int wprio = xnsched_calc_wprio(thread->base_class, prio);
+
+	thread->bprio = prio;
+	if (wprio == thread->wprio)
+		return true;
+
+	/*
+	 * We may not lower the effective/current priority of a
+	 * boosted thread when changing the base scheduling
+	 * parameters. Only xnsched_track_policy() and
+	 * xnsched_protect_priority() may do so when dealing with PI
+	 * and PP synchs resp.
+	 */
+	if (wprio < thread->wprio && xnthread_test_state(thread, XNBOOST))
+		return false;
+
+	thread->cprio = prio;
+
+	trace_cobalt_thread_set_current_prio(thread);
+
+	return true;
+}
+
+/* nklock locked, interrupts off. */
+void xnsched_track_policy(struct xnthread *thread,
+			  struct xnthread *target)
+{
+	union xnsched_policy_param param;
+
+	/*
+	 * Inherit (or reset) the effective scheduling class and
+	 * priority of a thread. Unlike xnsched_set_policy(), this
+	 * routine is allowed to lower the weighted priority with no
+	 * restriction, even if a boost is undergoing.
+	 */
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_dequeue(thread);
+	/*
+	 * Self-targeting means to reset the scheduling policy and
+	 * parameters to the base settings. Otherwise, make thread
+	 * inherit the scheduling parameters from target.
+	 */
+	if (target == thread) {
+		thread->sched_class = thread->base_class;
+		xnsched_trackprio(thread, NULL);
+		/*
+		 * Per SuSv2, resetting the base scheduling parameters
+		 * should not move the thread to the tail of its
+		 * priority group.
+		 */
+		if (xnthread_test_state(thread, XNREADY))
+			xnsched_requeue(thread);
+
+	} else {
+		xnsched_getparam(target, &param);
+		thread->sched_class = target->sched_class;
+		xnsched_trackprio(thread, &param);
+		if (xnthread_test_state(thread, XNREADY))
+			xnsched_enqueue(thread);
+	}
+
+	trace_cobalt_thread_set_current_prio(thread);
+
+	xnsched_set_resched(thread->sched);
+}
+
+/* nklock locked, interrupts off. */
+void xnsched_protect_priority(struct xnthread *thread, int prio)
+{
+	/*
+	 * Apply a PP boost by changing the effective priority of a
+	 * thread, forcing it to the RT class. Like
+	 * xnsched_track_policy(), this routine is allowed to lower
+	 * the weighted priority with no restriction, even if a boost
+	 * is undergoing.
+	 *
+	 * This routine only deals with active boosts, resetting the
+	 * base priority when leaving a PP boost is obtained by a call
+	 * to xnsched_track_policy().
+	 */
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_dequeue(thread);
+
+	thread->sched_class = &xnsched_class_rt;
+	xnsched_protectprio(thread, prio);
+
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_enqueue(thread);
+
+	trace_cobalt_thread_set_current_prio(thread);
+
+	xnsched_set_resched(thread->sched);
+}
+
+static void migrate_thread(struct xnthread *thread, struct xnsched *sched)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (xnthread_test_state(thread, XNREADY)) {
+		xnsched_dequeue(thread);
+		xnthread_clear_state(thread, XNREADY);
+	}
+
+	if (sched_class->sched_migrate)
+		sched_class->sched_migrate(thread, sched);
+	/*
+	 * WARNING: the scheduling class may have just changed as a
+	 * result of calling the per-class migration hook.
+	 */
+	thread->sched = sched;
+}
+
+/*
+ * nklock locked, interrupts off. thread must be runnable.
+ */
+void xnsched_migrate(struct xnthread *thread, struct xnsched *sched)
+{
+	xnsched_set_resched(thread->sched);
+	migrate_thread(thread, sched);
+	/* Move thread to the remote run queue. */
+	xnsched_putback(thread);
+}
+
+/*
+ * nklock locked, interrupts off. Thread may be blocked.
+ */
+void xnsched_migrate_passive(struct xnthread *thread, struct xnsched *sched)
+{
+	struct xnsched *last_sched = thread->sched;
+
+	migrate_thread(thread, sched);
+
+	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS)) {
+		xnsched_requeue(thread);
+		xnthread_set_state(thread, XNREADY);
+		xnsched_set_resched(last_sched);
+	}
+}
+
+#ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
+
+void xnsched_initq(struct xnsched_mlq *q)
+{
+	int prio;
+
+	q->elems = 0;
+	bitmap_zero(q->prio_map, XNSCHED_MLQ_LEVELS);
+
+	for (prio = 0; prio < XNSCHED_MLQ_LEVELS; prio++)
+		INIT_LIST_HEAD(q->heads + prio);
+}
+
+static inline int get_qindex(struct xnsched_mlq *q, int prio)
+{
+	XENO_BUG_ON(COBALT, prio < 0 || prio >= XNSCHED_MLQ_LEVELS);
+	/*
+	 * BIG FAT WARNING: We need to rescale the priority level to a
+	 * 0-based range. We use find_first_bit() to scan the bitmap
+	 * which is a bit scan forward operation. Therefore, the lower
+	 * the index value, the higher the priority (since least
+	 * significant bits will be found first when scanning the
+	 * bitmap).
+	 */
+	return XNSCHED_MLQ_LEVELS - prio - 1;
+}
+
+static struct list_head *add_q(struct xnsched_mlq *q, int prio)
+{
+	struct list_head *head;
+	int idx;
+
+	idx = get_qindex(q, prio);
+	head = q->heads + idx;
+	q->elems++;
+
+	/* New item is not linked yet. */
+	if (list_empty(head))
+		__set_bit(idx, q->prio_map);
+
+	return head;
+}
+
+void xnsched_addq(struct xnsched_mlq *q, struct xnthread *thread)
+{
+	struct list_head *head = add_q(q, thread->cprio);
+	list_add(&thread->rlink, head);
+}
+
+void xnsched_addq_tail(struct xnsched_mlq *q, struct xnthread *thread)
+{
+	struct list_head *head = add_q(q, thread->cprio);
+	list_add_tail(&thread->rlink, head);
+}
+
+static void del_q(struct xnsched_mlq *q,
+		  struct list_head *entry, int idx)
+{
+	struct list_head *head = q->heads + idx;
+
+	list_del(entry);
+	q->elems--;
+
+	if (list_empty(head))
+		__clear_bit(idx, q->prio_map);
+}
+
+void xnsched_delq(struct xnsched_mlq *q, struct xnthread *thread)
+{
+	del_q(q, &thread->rlink, get_qindex(q, thread->cprio));
+}
+
+struct xnthread *xnsched_getq(struct xnsched_mlq *q)
+{
+	struct xnthread *thread;
+	struct list_head *head;
+	int idx;
+
+	if (q->elems == 0)
+		return NULL;
+
+	idx = xnsched_weightq(q);
+	head = q->heads + idx;
+	XENO_BUG_ON(COBALT, list_empty(head));
+	thread = list_first_entry(head, struct xnthread, rlink);
+	del_q(q, &thread->rlink, idx);
+
+	return thread;
+}
+
+struct xnthread *xnsched_findq(struct xnsched_mlq *q, int prio)
+{
+	struct list_head *head;
+	int idx;
+
+	idx = get_qindex(q, prio);
+	head = q->heads + idx;
+	if (list_empty(head))
+		return NULL;
+
+	return list_first_entry(head, struct xnthread, rlink);
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+
+struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+	struct xnsched_mlq *q = &sched->rt.runnable;
+	struct xnthread *thread;
+	struct list_head *head;
+	int idx;
+
+	if (q->elems == 0)
+		return NULL;
+
+	/*
+	 * Some scheduling policies may be implemented as variants of
+	 * the core SCHED_FIFO class, sharing its runqueue
+	 * (e.g. SCHED_SPORADIC, SCHED_QUOTA). This means that we have
+	 * to do some cascading to call the right pick handler
+	 * eventually.
+	 */
+	idx = xnsched_weightq(q);
+	head = q->heads + idx;
+	XENO_BUG_ON(COBALT, list_empty(head));
+
+	/*
+	 * The active class (i.e. ->sched_class) is the one currently
+	 * queuing the thread, reflecting any priority boost due to
+	 * PI.
+	 */
+	thread = list_first_entry(head, struct xnthread, rlink);
+	if (unlikely(thread->sched_class != &xnsched_class_rt))
+		return thread->sched_class->sched_pick(sched);
+
+	del_q(q, &thread->rlink, idx);
+
+	return thread;
+}
+
+#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
+
+#else /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+struct xnthread *xnsched_findq(struct list_head *q, int prio)
+{
+	struct xnthread *thread;
+
+	if (list_empty(q))
+		return NULL;
+
+	/* Find thread leading a priority group. */
+	list_for_each_entry(thread, q, rlink) {
+		if (prio == thread->cprio)
+			return thread;
+	}
+
+	return NULL;
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+
+struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+	struct list_head *q = &sched->rt.runnable;
+	struct xnthread *thread;
+
+	if (list_empty(q))
+		return NULL;
+
+	thread = list_first_entry(q, struct xnthread, rlink);
+	if (unlikely(thread->sched_class != &xnsched_class_rt))
+		return thread->sched_class->sched_pick(sched);
+
+	list_del(&thread->rlink);
+
+	return thread;
+}
+
+#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
+
+#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+/**
+ * @fn int xnsched_run(void)
+ * @brief The rescheduling procedure.
+ *
+ * This is the central rescheduling routine which should be called to
+ * validate and apply changes which have previously been made to the
+ * nucleus scheduling state, such as suspending, resuming or changing
+ * the priority of threads.  This call performs context switches as
+ * needed. xnsched_run() schedules out the current thread if:
+ *
+ * - the current thread is about to block.
+ * - a runnable thread from a higher priority scheduling class is
+ * waiting for the CPU.
+ * - the current thread does not lead the runnable threads from its
+ * own scheduling class (i.e. round-robin).
+ *
+ * The Cobalt core implements a lazy rescheduling scheme so that most
+ * of the services affecting the threads state MUST be followed by a
+ * call to the rescheduling procedure for the new scheduling state to
+ * be applied.
+ *
+ * In other words, multiple changes on the scheduler state can be done
+ * in a row, waking threads up, blocking others, without being
+ * immediately translated into the corresponding context switches.
+ * When all changes have been applied, xnsched_run() should be called
+ * for considering those changes, and possibly switching context.
+ *
+ * As a notable exception to the previous principle however, every
+ * action which ends up suspending the current thread begets an
+ * implicit call to the rescheduling procedure on behalf of the
+ * blocking service.
+ *
+ * Typically, self-suspension or sleeping on a synchronization object
+ * automatically leads to a call to the rescheduling procedure,
+ * therefore the caller does not need to explicitly issue
+ * xnsched_run() after such operations.
+ *
+ * The rescheduling procedure always leads to a null-effect if it is
+ * called on behalf of an interrupt service routine. Any outstanding
+ * scheduler lock held by the outgoing thread will be restored when
+ * the thread is scheduled back in.
+ *
+ * Calling this procedure with no applicable context switch pending is
+ * harmless and simply leads to a null-effect.
+ *
+ * @return Non-zero is returned if a context switch actually happened,
+ * otherwise zero if the current thread was left running.
+ *
+ * @coretags{unrestricted}
+ */
+static inline int test_resched(struct xnsched *sched)
+{
+	int resched = xnsched_resched_p(sched);
+#ifdef CONFIG_SMP
+	/* Send resched IPI to remote CPU(s). */
+	if (unlikely(!cpumask_empty(&sched->resched))) {
+		smp_mb();
+		pipeline_send_resched_ipi(&sched->resched);
+		cpumask_clear(&sched->resched);
+	}
+#endif
+	sched->status &= ~XNRESCHED;
+
+	return resched;
+}
+
+static inline void enter_root(struct xnthread *root)
+{
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_stop(&root->sched->wdtimer);
+#endif
+}
+
+static inline void leave_root(struct xnthread *root)
+{
+	pipeline_prep_switch_oob(root);
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_start(&root->sched->wdtimer, get_watchdog_timeout(),
+		      XN_INFINITE, XN_RELATIVE);
+#endif
+}
+
+void __xnsched_run_handler(void) /* hw interrupts off. */
+{
+	trace_cobalt_schedule_remote(xnsched_current());
+	xnsched_run();
+}
+
+static inline void do_lazy_user_work(struct xnthread *curr)
+{
+	xnthread_commit_ceiling(curr);
+}
+
+int ___xnsched_run(struct xnsched *sched)
+{
+	bool switched = false, leaving_inband;
+	struct xnthread *prev, *next, *curr;
+	spl_t s;
+
+	XENO_WARN_ON_ONCE(COBALT, is_secondary_domain());
+
+	trace_cobalt_schedule(sched);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	curr = sched->curr;
+	/*
+	 * CAUTION: xnthread_host_task(curr) may be unsynced and even
+	 * stale if curr = &rootcb, since the task logged by
+	 * leave_root() may not still be the current one. Use
+	 * "current" for disambiguating.
+	 */
+	xntrace_pid(task_pid_nr(current), xnthread_current_priority(curr));
+
+	if (xnthread_test_state(curr, XNUSER))
+		do_lazy_user_work(curr);
+
+	if (!test_resched(sched))
+		goto out;
+
+	next = xnsched_pick_next(sched);
+	if (next == curr) {
+		if (unlikely(xnthread_test_state(next, XNROOT))) {
+			if (sched->lflags & XNHTICK)
+				xnintr_host_tick(sched);
+			if (sched->lflags & XNHDEFER)
+				xnclock_program_shot(&nkclock, sched);
+		}
+		goto out;
+	}
+
+	prev = curr;
+
+	trace_cobalt_switch_context(prev, next);
+
+	/*
+	 * sched->curr is shared locklessly with xnsched_run() and
+	 * xnsched_lock(). WRITE_ONCE() makes sure sched->curr is
+	 * written atomically so that these routines always observe
+	 * consistent values by preventing the compiler from using
+	 * store tearing.
+	 */
+	WRITE_ONCE(sched->curr, next);
+	leaving_inband = false;
+
+	if (xnthread_test_state(prev, XNROOT)) {
+		leave_root(prev);
+		leaving_inband = true;
+	} else if (xnthread_test_state(next, XNROOT)) {
+		if (sched->lflags & XNHTICK)
+			xnintr_host_tick(sched);
+		if (sched->lflags & XNHDEFER)
+			xnclock_program_shot(&nkclock, sched);
+		enter_root(next);
+	}
+
+	xnstat_exectime_switch(sched, &next->stat.account);
+	xnstat_counter_inc(&next->stat.csw);
+
+	if (pipeline_switch_to(prev, next, leaving_inband))
+		/* oob -> in-band transition detected. */
+		return true;
+
+	/*
+	 * Re-read sched->curr for tracing: the current thread may
+	 * have switched from in-band to oob context.
+	 */
+	xntrace_pid(task_pid_nr(current),
+		xnthread_current_priority(xnsched_current()->curr));
+
+	switched = true;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return !!switched;
+}
+EXPORT_SYMBOL_GPL(___xnsched_run);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static struct xnvfile_directory sched_vfroot;
+
+struct vfile_schedlist_priv {
+	struct xnthread *curr;
+	xnticks_t start_time;
+};
+
+struct vfile_schedlist_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	char sched_class[XNOBJECT_NAME_LEN];
+	char personality[XNOBJECT_NAME_LEN];
+	int cprio;
+	xnticks_t timeout;
+	int state;
+};
+
+static struct xnvfile_snapshot_ops vfile_schedlist_ops;
+
+static struct xnvfile_snapshot schedlist_vfile = {
+	.privsz = sizeof(struct vfile_schedlist_priv),
+	.datasz = sizeof(struct vfile_schedlist_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_schedlist_ops,
+};
+
+static int vfile_schedlist_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_schedlist_priv *priv = xnvfile_iterator_priv(it);
+
+	/* &nkthreadq cannot be empty (root thread(s)). */
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+	priv->start_time = xnclock_read_monotonic(&nkclock);
+
+	return cobalt_nrthreads;
+}
+
+static int vfile_schedlist_next(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedlist_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_schedlist_data *p = data;
+	xnticks_t timeout, period;
+	struct xnthread *thread;
+	xnticks_t base_time;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->cprio = thread->cprio;
+	p->state = xnthread_get_state(thread);
+	if (thread->lock_count > 0)
+		p->state |= XNLOCK;
+	knamecpy(p->sched_class, thread->sched_class->name);
+	knamecpy(p->personality, thread->personality->name);
+	period = xnthread_get_period(thread);
+	base_time = priv->start_time;
+	if (xntimer_clock(&thread->ptimer) != &nkclock)
+		base_time = xnclock_read_monotonic(xntimer_clock(&thread->ptimer));
+	timeout = xnthread_get_timeout(thread, base_time);
+	/*
+	 * Here we cheat: thread is periodic and the sampling rate may
+	 * be high, so it is indeed possible that the next tick date
+	 * from the ptimer progresses fast enough while we are busy
+	 * collecting output data in this loop, so that next_date -
+	 * start_time > period. In such a case, we simply ceil the
+	 * value to period to keep the result meaningful, even if not
+	 * necessarily accurate. But what does accuracy mean when the
+	 * sampling frequency is high, and the way to read it has to
+	 * go through the vfile interface anyway?
+	 */
+	if (period > 0 && period < timeout &&
+	    !xntimer_running_p(&thread->rtimer))
+		timeout = period;
+
+	p->timeout = timeout;
+
+	return 1;
+}
+
+static int vfile_schedlist_show(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedlist_data *p = data;
+	char sbuf[64], pbuf[16], tbuf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-6s %-5s  %-8s  %-5s %-12s  %-10s %s\n",
+			       "CPU", "PID", "CLASS", "TYPE", "PRI", "TIMEOUT",
+			       "STAT", "NAME");
+	else {
+		ksformat(pbuf, sizeof(pbuf), "%3d", p->cprio);
+		xntimer_format_time(p->timeout, tbuf, sizeof(tbuf));
+		xnthread_format_status(p->state, sbuf, sizeof(sbuf));
+
+		xnvfile_printf(it,
+			       "%3u  %-6d %-5s  %-8s  %-5s %-12s  %-10s %s%s%s\n",
+			       p->cpu,
+			       p->pid,
+			       p->sched_class,
+			       p->personality,
+			       pbuf,
+			       tbuf,
+			       sbuf,
+			       (p->state & XNUSER) ? "" : "[",
+			       p->name,
+			       (p->state & XNUSER) ? "" : "]");
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_schedlist_ops = {
+	.rewind = vfile_schedlist_rewind,
+	.next = vfile_schedlist_next,
+	.show = vfile_schedlist_show,
+};
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+static spl_t vfile_schedstat_lock_s;
+
+static int vfile_schedstat_get_lock(struct xnvfile *vfile)
+{
+	int ret;
+
+	ret = xnintr_get_query_lock();
+	if (ret < 0)
+		return ret;
+	xnlock_get_irqsave(&nklock, vfile_schedstat_lock_s);
+	return 0;
+}
+
+static void vfile_schedstat_put_lock(struct xnvfile *vfile)
+{
+	xnlock_put_irqrestore(&nklock, vfile_schedstat_lock_s);
+	xnintr_put_query_lock();
+}
+
+static struct xnvfile_lock_ops vfile_schedstat_lockops = {
+	.get = vfile_schedstat_get_lock,
+	.put = vfile_schedstat_put_lock,
+};
+
+struct vfile_schedstat_priv {
+	int irq;
+	struct xnthread *curr;
+	struct xnintr_iterator intr_it;
+};
+
+struct vfile_schedstat_data {
+	int cpu;
+	pid_t pid;
+	int state;
+	char name[XNOBJECT_NAME_LEN];
+	unsigned long ssw;
+	unsigned long csw;
+	unsigned long xsc;
+	unsigned long pf;
+	xnticks_t exectime_period;
+	xnticks_t account_period;
+	xnticks_t exectime_total;
+	struct xnsched_class *sched_class;
+	xnticks_t period;
+	int cprio;
+};
+
+static struct xnvfile_snapshot_ops vfile_schedstat_ops;
+
+static struct xnvfile_snapshot schedstat_vfile = {
+	.privsz = sizeof(struct vfile_schedstat_priv),
+	.datasz = sizeof(struct vfile_schedstat_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_schedstat_ops,
+	.entry = { .lockops = &vfile_schedstat_lockops },
+};
+
+static int vfile_schedstat_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_schedstat_priv *priv = xnvfile_iterator_priv(it);
+	int irqnr;
+
+	/*
+	 * The activity numbers on each valid interrupt descriptor are
+	 * grouped under a pseudo-thread.
+	 */
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+	priv->irq = 0;
+	irqnr = xnintr_query_init(&priv->intr_it) * num_online_cpus();
+
+	return irqnr + cobalt_nrthreads;
+}
+
+static int vfile_schedstat_next(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedstat_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_schedstat_data *p = data;
+	struct xnthread *thread;
+	struct xnsched *sched;
+	xnticks_t period;
+	int __maybe_unused ret;
+
+	if (priv->curr == NULL)
+		/*
+		 * We are done with actual threads, scan interrupt
+		 * descriptors.
+		 */
+		goto scan_irqs;
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	sched = thread->sched;
+	p->cpu = xnsched_cpu(sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->state = xnthread_get_state(thread);
+	if (thread->lock_count > 0)
+		p->state |= XNLOCK;
+	p->ssw = xnstat_counter_get(&thread->stat.ssw);
+	p->csw = xnstat_counter_get(&thread->stat.csw);
+	p->xsc = xnstat_counter_get(&thread->stat.xsc);
+	p->pf = xnstat_counter_get(&thread->stat.pf);
+	p->sched_class = thread->sched_class;
+	p->cprio = thread->cprio;
+	p->period = xnthread_get_period(thread);
+
+	period = sched->last_account_switch - thread->stat.lastperiod.start;
+	if (period == 0 && thread == sched->curr) {
+		p->exectime_period = 1;
+		p->account_period = 1;
+	} else {
+		p->exectime_period = thread->stat.account.total -
+			thread->stat.lastperiod.total;
+		p->account_period = period;
+	}
+	p->exectime_total = thread->stat.account.total;
+	thread->stat.lastperiod.total = thread->stat.account.total;
+	thread->stat.lastperiod.start = sched->last_account_switch;
+
+	return 1;
+
+scan_irqs:
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+	if (priv->irq >= PIPELINE_NR_IRQS)
+		return 0;	/* All done. */
+
+	ret = xnintr_query_next(priv->irq, &priv->intr_it, p->name);
+	if (ret) {
+		if (ret == -EAGAIN)
+			xnvfile_touch(it->vfile); /* force rewind. */
+		priv->irq++;
+		return VFILE_SEQ_SKIP;
+	}
+
+	if (!xnsched_supported_cpu(priv->intr_it.cpu))
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = priv->intr_it.cpu;
+	p->csw = priv->intr_it.hits;
+	p->exectime_period = priv->intr_it.exectime_period;
+	p->account_period = priv->intr_it.account_period;
+	p->exectime_total = priv->intr_it.exectime_total;
+	p->pid = 0;
+	p->state =  0;
+	p->ssw = 0;
+	p->xsc = 0;
+	p->pf = 0;
+	p->sched_class = &xnsched_class_idle;
+	p->cprio = 0;
+	p->period = 0;
+
+	return 1;
+#else /* !CONFIG_XENO_OPT_STATS_IRQS */
+	return 0;
+#endif /* !CONFIG_XENO_OPT_STATS_IRQS */
+}
+
+static int vfile_schedstat_show(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedstat_data *p = data;
+	int usage = 0;
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-6s %-10s %-10s %-10s %-4s  %-8s  %5s"
+			       "  %s\n",
+			       "CPU", "PID", "MSW", "CSW", "XSC", "PF", "STAT", "%CPU",
+			       "NAME");
+	else {
+		if (p->account_period) {
+			while (p->account_period > 0xffffffffUL) {
+				p->exectime_period >>= 16;
+				p->account_period >>= 16;
+			}
+			usage = xnarch_ulldiv(p->exectime_period * 1000LL +
+					      (p->account_period >> 1),
+					      p->account_period, NULL);
+		}
+		xnvfile_printf(it,
+			       "%3u  %-6d %-10lu %-10lu %-10lu %-4lu  %.8x  %3u.%u"
+			       "  %s%s%s\n",
+			       p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, p->state,
+			       usage / 10, usage % 10,
+			       (p->state & XNUSER) ? "" : "[",
+			       p->name,
+			       (p->state & XNUSER) ? "" : "]");
+	}
+
+	return 0;
+}
+
+static int vfile_schedacct_show(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedstat_data *p = data;
+
+	if (p == NULL)
+		return 0;
+
+	xnvfile_printf(it, "%u %d %lu %lu %lu %lu %.8x %Lu %Lu %Lu %s %s %d %Lu\n",
+		       p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, p->state,
+		       xnclock_ticks_to_ns(&nkclock, p->account_period),
+		       xnclock_ticks_to_ns(&nkclock, p->exectime_period),
+		       xnclock_ticks_to_ns(&nkclock, p->exectime_total),
+		       p->name,
+		       p->sched_class->name,
+		       p->cprio,
+		       p->period);
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_schedstat_ops = {
+	.rewind = vfile_schedstat_rewind,
+	.next = vfile_schedstat_next,
+	.show = vfile_schedstat_show,
+};
+
+/*
+ * An accounting vfile is a thread statistics vfile in disguise with a
+ * different output format, which is parser-friendly.
+ */
+static struct xnvfile_snapshot_ops vfile_schedacct_ops;
+
+static struct xnvfile_snapshot schedacct_vfile = {
+	.privsz = sizeof(struct vfile_schedstat_priv),
+	.datasz = sizeof(struct vfile_schedstat_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_schedacct_ops,
+};
+
+static struct xnvfile_snapshot_ops vfile_schedacct_ops = {
+	.rewind = vfile_schedstat_rewind,
+	.next = vfile_schedstat_next,
+	.show = vfile_schedacct_show,
+};
+
+#endif /* CONFIG_XENO_OPT_STATS */
+
+#ifdef CONFIG_SMP
+
+static int affinity_vfile_show(struct xnvfile_regular_iterator *it,
+			       void *data)
+{
+	unsigned long val = 0;
+	int cpu;
+
+	for (cpu = 0; cpu < BITS_PER_LONG; cpu++)
+		if (cpumask_test_cpu(cpu, &cobalt_cpu_affinity))
+			val |= (1UL << cpu);
+
+	xnvfile_printf(it, "%08lx\n", val);
+
+	return 0;
+}
+
+static ssize_t affinity_vfile_store(struct xnvfile_input *input)
+{
+	cpumask_t affinity;
+	ssize_t ret;
+	long val;
+	int cpu;
+	spl_t s;
+
+	ret = xnvfile_get_integer(input, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val == 0)
+		affinity = xnsched_realtime_cpus; /* Reset to default. */
+	else {
+		cpumask_clear(&affinity);
+		for (cpu = 0; cpu < BITS_PER_LONG; cpu++, val >>= 1) {
+			if (val & 1) {
+				/*
+				 * The new dynamic affinity must be a strict
+				 * subset of the static set of supported CPUs.
+				 */
+				if (!cpumask_test_cpu(cpu,
+						      &xnsched_realtime_cpus))
+					return -EINVAL;
+				cpumask_set_cpu(cpu, &affinity);
+			}
+		}
+	}
+
+	cpumask_and(&affinity, &affinity, cpu_online_mask);
+	if (cpumask_empty(&affinity))
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_cpu_affinity = affinity;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+static struct xnvfile_regular_ops affinity_vfile_ops = {
+	.show = affinity_vfile_show,
+	.store = affinity_vfile_store,
+};
+
+static struct xnvfile_regular affinity_vfile = {
+	.ops = &affinity_vfile_ops,
+};
+
+#endif /* CONFIG_SMP */
+
+int xnsched_init_proc(void)
+{
+	struct xnsched_class *p;
+	int ret;
+
+	ret = xnvfile_init_dir("sched", &sched_vfroot, &cobalt_vfroot);
+	if (ret)
+		return ret;
+
+	ret = xnvfile_init_snapshot("threads", &schedlist_vfile, &sched_vfroot);
+	if (ret)
+		return ret;
+
+	for_each_xnsched_class(p) {
+		if (p->sched_init_vfile) {
+			ret = p->sched_init_vfile(p, &sched_vfroot);
+			if (ret)
+				return ret;
+		}
+	}
+
+#ifdef CONFIG_XENO_OPT_STATS
+	ret = xnvfile_init_snapshot("stat", &schedstat_vfile, &sched_vfroot);
+	if (ret)
+		return ret;
+	ret = xnvfile_init_snapshot("acct", &schedacct_vfile, &sched_vfroot);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_STATS */
+
+#ifdef CONFIG_SMP
+	xnvfile_init_regular("affinity", &affinity_vfile, &cobalt_vfroot);
+#endif /* CONFIG_SMP */
+
+	return 0;
+}
+
+void xnsched_cleanup_proc(void)
+{
+	struct xnsched_class *p;
+
+	for_each_xnsched_class(p) {
+		if (p->sched_cleanup_vfile)
+			p->sched_cleanup_vfile(p);
+	}
+
+#ifdef CONFIG_SMP
+	xnvfile_destroy_regular(&affinity_vfile);
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_XENO_OPT_STATS
+	xnvfile_destroy_snapshot(&schedacct_vfile);
+	xnvfile_destroy_snapshot(&schedstat_vfile);
+#endif /* CONFIG_XENO_OPT_STATS */
+	xnvfile_destroy_snapshot(&schedlist_vfile);
+	xnvfile_destroy_dir(&sched_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+/** @} */
+++ linux-patched/kernel/xenomai/bufd.c	2022-03-21 12:58:28.865894071 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/synch.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/bufd.h>
+#include <cobalt/kernel/assert.h>
+#include <asm/xenomai/syscall.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_bufd Buffer descriptor
+ *
+ * Abstraction for copying data to/from different address spaces
+ *
+ * A buffer descriptor is a simple abstraction dealing with copy
+ * operations to/from memory buffers which may belong to different
+ * address spaces.
+ *
+ * To this end, the buffer descriptor library provides a small set of
+ * copy routines which are aware of address space restrictions when
+ * moving data, and a generic container type which can hold a
+ * reference to - or cover - a particular memory area, either present
+ * in kernel space, or in any of the existing user memory contexts.
+ *
+ * The goal of the buffer descriptor abstraction is to hide address
+ * space specifics from Xenomai services dealing with memory areas,
+ * allowing them to operate on multiple address spaces seamlessly.
+ *
+ * The common usage patterns are as follows:
+ *
+ * - Implementing a Xenomai syscall returning a bulk of data to the
+ *   caller, which may have to be copied back to either kernel or user
+ *   space:
+ *
+ *   @code
+ *   [Syscall implementation]
+ *   ssize_t rt_bulk_read_inner(struct xnbufd *bufd)
+ *   {
+ *       ssize_t ret;
+ *       size_t len;
+ *       void *bulk;
+ *
+ *       bulk = get_next_readable_bulk(&len);
+ *       ret = xnbufd_copy_from_kmem(bufd, bulk, min(bufd->b_len, len));
+ *       free_bulk(bulk);
+ *
+ *       ret = this_may_fail();
+ *       if (ret)
+ *	       xnbufd_invalidate(bufd);
+ *
+ *       return ret;
+ *   }
+ *
+ *   [Kernel wrapper for in-kernel calls]
+ *   int rt_bulk_read(void *ptr, size_t len)
+ *   {
+ *       struct xnbufd bufd;
+ *       ssize_t ret;
+ *
+ *       xnbufd_map_kwrite(&bufd, ptr, len);
+ *       ret = rt_bulk_read_inner(&bufd);
+ *       xnbufd_unmap_kwrite(&bufd);
+ *
+ *       return ret;
+ *   }
+ *
+ *   [Userland trampoline for user syscalls]
+ *   int __rt_bulk_read(struct pt_regs *regs)
+ *   {
+ *       struct xnbufd bufd;
+ *       void __user *ptr;
+ *       ssize_t ret;
+ *       size_t len;
+ *
+ *       ptr = (void __user *)__xn_reg_arg1(regs);
+ *       len = __xn_reg_arg2(regs);
+ *
+ *       xnbufd_map_uwrite(&bufd, ptr, len);
+ *       ret = rt_bulk_read_inner(&bufd);
+ *       xnbufd_unmap_uwrite(&bufd);
+ *
+ *       return ret;
+ *   }
+ *   @endcode
+ *
+ * - Implementing a Xenomai syscall receiving a bulk of data from the
+ *   caller, which may have to be read from either kernel or user
+ *   space:
+ *
+ *   @code
+ *   [Syscall implementation]
+ *   ssize_t rt_bulk_write_inner(struct xnbufd *bufd)
+ *   {
+ *       void *bulk = get_free_bulk(bufd->b_len);
+ *       return xnbufd_copy_to_kmem(bulk, bufd, bufd->b_len);
+ *   }
+ *
+ *   [Kernel wrapper for in-kernel calls]
+ *   int rt_bulk_write(const void *ptr, size_t len)
+ *   {
+ *       struct xnbufd bufd;
+ *       ssize_t ret;
+ *
+ *       xnbufd_map_kread(&bufd, ptr, len);
+ *       ret = rt_bulk_write_inner(&bufd);
+ *       xnbufd_unmap_kread(&bufd);
+ *
+ *       return ret;
+ *   }
+ *
+ *   [Userland trampoline for user syscalls]
+ *   int __rt_bulk_write(struct pt_regs *regs)
+ *   {
+ *       struct xnbufd bufd;
+ *       void __user *ptr;
+ *       ssize_t ret;
+ *       size_t len;
+ *
+ *       ptr = (void __user *)__xn_reg_arg1(regs);
+ *       len = __xn_reg_arg2(regs);
+ *
+ *       xnbufd_map_uread(&bufd, ptr, len);
+ *       ret = rt_bulk_write_inner(&bufd);
+ *       xnbufd_unmap_uread(&bufd);
+ *
+ *       return ret;
+ *   }
+ *   @endcode
+ *
+ *@{*/
+
+/**
+ * @fn void xnbufd_map_kread(struct xnbufd *bufd, const void *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for reading from kernel memory.
+ *
+ * The new buffer descriptor may be used to copy data from kernel
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_kread().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes kernel memory area, starting from @a ptr.
+ *
+ * @param ptr The start of the kernel buffer to map.
+ *
+ * @param len The length of the kernel buffer starting at @a ptr.
+ *
+ * @coretags{unrestricted}
+ */
+
+/**
+ * @fn void xnbufd_map_kwrite(struct xnbufd *bufd, void *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for writing to kernel memory.
+ *
+ * The new buffer descriptor may be used to copy data to kernel
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_kwrite().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes kernel memory area, starting from @a ptr.
+ *
+ * @param ptr The start of the kernel buffer to map.
+ *
+ * @param len The length of the kernel buffer starting at @a ptr.
+ *
+ * @coretags{unrestricted}
+ */
+void xnbufd_map_kmem(struct xnbufd *bufd, void *ptr, size_t len)
+{
+	bufd->b_ptr = ptr;
+	bufd->b_len = len;
+	bufd->b_mm = NULL;
+	bufd->b_off = 0;
+	bufd->b_carry = NULL;
+}
+EXPORT_SYMBOL_GPL(xnbufd_map_kmem);
+
+/**
+ * @fn void xnbufd_map_uread(struct xnbufd *bufd, const void __user *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for reading from user memory.
+ *
+ * The new buffer descriptor may be used to copy data from user
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_uread().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes user memory area, starting from @a ptr. @a ptr is
+ * never dereferenced directly, since it may refer to a buffer that
+ * lives in another address space.
+ *
+ * @param ptr The start of the user buffer to map.
+ *
+ * @param len The length of the user buffer starting at @a ptr.
+ *
+ * @coretags{task-unrestricted}
+ */
+
+/**
+ * @fn void xnbufd_map_uwrite(struct xnbufd *bufd, void __user *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for writing to user memory.
+ *
+ * The new buffer descriptor may be used to copy data to user
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_uwrite().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes user memory area, starting from @a ptr. @a ptr is
+ * never dereferenced directly, since it may refer to a buffer that
+ * lives in another address space.
+ *
+ * @param ptr The start of the user buffer to map.
+ *
+ * @param len The length of the user buffer starting at @a ptr.
+ *
+ * @coretags{task-unrestricted}
+ */
+
+void xnbufd_map_umem(struct xnbufd *bufd, void __user *ptr, size_t len)
+{
+	bufd->b_ptr = ptr;
+	bufd->b_len = len;
+	bufd->b_mm = current->mm;
+	bufd->b_off = 0;
+	bufd->b_carry = NULL;
+}
+EXPORT_SYMBOL_GPL(xnbufd_map_umem);
+
+/**
+ * @fn ssize_t xnbufd_copy_to_kmem(void *to, struct xnbufd *bufd, size_t len)
+ * @brief Copy memory covered by a buffer descriptor to kernel memory.
+ *
+ * This routine copies @a len bytes from the area referred to by the
+ * buffer descriptor @a bufd to the kernel memory area @a to.
+ * xnbufd_copy_to_kmem() tracks the read offset within the source
+ * memory internally, so that it may be called several times in a
+ * loop, until the entire memory area is loaded.
+ *
+ * The source address space is dealt with, according to the following
+ * rules:
+ *
+ * - if @a bufd refers to readable kernel area (i.e. see
+ *   xnbufd_map_kread()), the copy is immediately and fully performed
+ *   with no restriction.
+ *
+ * - if @a bufd refers to a readable user area (i.e. see
+ *   xnbufd_map_uread()), the copy is performed only if that area
+ *   lives in the currently active address space, and only if the
+ *   caller may sleep Linux-wise to process any potential page fault
+ *   which may arise while reading from that memory.
+ *
+ * - any attempt to read from @a bufd from a non-suitable context is
+ *   considered as a bug, and will raise a panic assertion when the
+ *   nucleus is compiled in debug mode.
+ *
+ * @param to The start address of the kernel memory to copy to.
+ *
+ * @param bufd The address of the buffer descriptor covering the user
+ * memory to copy data from.
+ *
+ * @param len The length of the user memory to copy from @a bufd.
+ *
+ * @return The number of bytes read so far from the memory area
+ * covered by @a ubufd. Otherwise:
+ *
+ * - -EINVAL is returned upon attempt to read from the user area from
+ *   an invalid context. This error is only returned when the debug
+ *   mode is disabled; otherwise a panic assertion is raised.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ *
+ * This routine may switch the caller to secondary mode if a page
+ * fault occurs while reading from the user area. For that reason,
+ * xnbufd_copy_to_kmem() may only be called from a preemptible section
+ * (Linux-wise).
+ */
+ssize_t xnbufd_copy_to_kmem(void *to, struct xnbufd *bufd, size_t len)
+{
+	caddr_t from;
+
+	thread_only();
+
+	if (len == 0)
+		goto out;
+
+	from = bufd->b_ptr + bufd->b_off;
+
+	/*
+	 * If the descriptor covers a source buffer living in the
+	 * kernel address space, we may read from it directly.
+	 */
+	if (bufd->b_mm == NULL) {
+		memcpy(to, from, len);
+		goto advance_offset;
+	}
+
+	/*
+	 * We want to read data from user-space, check whether:
+	 * 1) the source buffer lies in the current address space,
+	 * 2) we may fault while reading from the buffer directly.
+	 *
+	 * If we can't reach the buffer, or the current context may
+	 * not fault while reading data from it, copy_from_user() is
+	 * not an option and we have a bug somewhere, since there is
+	 * no way we could fetch the data to kernel space immediately.
+	 *
+	 * Note that we don't check for non-preemptible Linux context
+	 * here, since the source buffer would live in kernel space in
+	 * such a case.
+	 */
+	if (current->mm == bufd->b_mm) {
+		preemptible_only();
+		if (cobalt_copy_from_user(to, (void __user *)from, len))
+			return -EFAULT;
+		goto advance_offset;
+	}
+
+	XENO_BUG(COBALT);
+
+	return -EINVAL;
+
+advance_offset:
+	bufd->b_off += len;
+out:
+	return (ssize_t)bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_copy_to_kmem);
+
+/**
+ * @fn ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd, void *from, size_t len)
+ * @brief Copy kernel memory to the area covered by a buffer descriptor.
+ *
+ * This routine copies @a len bytes from the kernel memory starting at
+ * @a from to the area referred to by the buffer descriptor @a
+ * bufd. xnbufd_copy_from_kmem() tracks the write offset within the
+ * destination memory internally, so that it may be called several
+ * times in a loop, until the entire memory area is stored.
+ *
+ * The destination address space is dealt with, according to the
+ * following rules:
+ *
+ * - if @a bufd refers to a writable kernel area (i.e. see
+ *   xnbufd_map_kwrite()), the copy is immediatly and fully performed
+ *   with no restriction.
+ *
+ * - if @a bufd refers to a writable user area (i.e. see
+ *   xnbufd_map_uwrite()), the copy is performed only if that area
+ *   lives in the currently active address space, and only if the
+ *   caller may sleep Linux-wise to process any potential page fault
+ *   which may arise while writing to that memory.
+ *
+ * - if @a bufd refers to a user area which may not be immediately
+ *   written to from the current context, the copy is postponed until
+ *   xnbufd_unmap_uwrite() is invoked for @a ubufd, at which point the
+ *   copy will take place. In such a case, the source memory is
+ *   transferred to a carry over buffer allocated internally; this
+ *   operation may lead to request dynamic memory from the nucleus
+ *   heap if @a len is greater than 64 bytes.
+ *
+ * @param bufd The address of the buffer descriptor covering the user
+ * memory to copy data to.
+ *
+ * @param from The start address of the kernel memory to copy from.
+ *
+ * @param len The length of the kernel memory to copy to @a bufd.
+ *
+ * @return The number of bytes written so far to the memory area
+ * covered by @a ubufd. Otherwise,
+ *
+ * - -ENOMEM is returned when no memory is available from the nucleus
+ *    heap to allocate the carry over buffer.
+ *
+ * @coretags{unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ *
+ * This routine may switch the caller to secondary mode if a page
+ * fault occurs while reading from the user area. For that reason,
+ * xnbufd_copy_to_kmem() may only be called from a preemptible section
+ * (Linux-wise).
+ */
+ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd, void *from, size_t len)
+{
+	caddr_t to;
+
+	thread_only();
+
+	if (len == 0)
+		goto out;
+
+	to = bufd->b_ptr + bufd->b_off;
+
+	/*
+	 * If the descriptor covers a destination buffer living in the
+	 * kernel address space, we may copy to it directly.
+	 */
+	if (bufd->b_mm == NULL)
+		goto direct_copy;
+
+	/*
+	 * We want to pass data to user-space, check whether:
+	 * 1) the destination buffer lies in the current address space,
+	 * 2) we may fault while writing to the buffer directly.
+	 *
+	 * If we can't reach the buffer, or the current context may
+	 * not fault while copying data to it, copy_to_user() is not
+	 * an option and we have to convey the data from kernel memory
+	 * through the carry over buffer.
+	 *
+	 * Note that we don't check for non-preemptible Linux context
+	 * here: feeding a RT activity with data from a non-RT context
+	 * is wrong in the first place, so never mind.
+	 */
+	if (current->mm == bufd->b_mm) {
+		preemptible_only();
+		if (cobalt_copy_to_user((void __user *)to, from, len))
+			return -EFAULT;
+		goto advance_offset;
+	}
+
+	/*
+	 * We need a carry over buffer to convey the data to
+	 * user-space. xnbufd_unmap_uwrite() should be called on the
+	 * way back to user-space to update the destination buffer
+	 * from the carry over area.
+	 */
+	if (bufd->b_carry == NULL) {
+		/*
+		 * Try to use the fast carry over area available
+		 * directly from the descriptor for short messages, to
+		 * save a dynamic allocation request.
+		 */
+		if (bufd->b_len <= sizeof(bufd->b_buf))
+			bufd->b_carry = bufd->b_buf;
+		else {
+			bufd->b_carry = xnmalloc(bufd->b_len);
+			if (bufd->b_carry == NULL)
+				return -ENOMEM;
+		}
+		to = bufd->b_carry;
+	} else
+		to = bufd->b_carry + bufd->b_off;
+
+direct_copy:
+	memcpy(to, from, len);
+
+advance_offset:
+	bufd->b_off += len;
+out:
+	return (ssize_t)bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_copy_from_kmem);
+
+/**
+ * @fn void xnbufd_unmap_uread(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_uread().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_uread(), to read data from a user area.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes read so far from the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ */
+ssize_t xnbufd_unmap_uread(struct xnbufd *bufd)
+{
+	preemptible_only();
+
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_uread);
+
+/**
+ * @fn void xnbufd_unmap_uwrite(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_uwrite().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_uwrite(), to write data to a user area.
+ *
+ * The main action taken is to write the contents of the kernel memory
+ * area passed to xnbufd_copy_from_kmem() whenever the copy operation
+ * was postponed at that time; the carry over buffer is eventually
+ * released as needed. If xnbufd_copy_from_kmem() was allowed to copy
+ * to the destination user memory at once, then xnbufd_unmap_uwrite()
+ * leads to a no-op.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes written so far to the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ */
+ssize_t xnbufd_unmap_uwrite(struct xnbufd *bufd)
+{
+	ssize_t ret = 0;
+	void __user *to;
+	void *from;
+	size_t len;
+
+	preemptible_only();
+
+	len = bufd->b_off;
+
+	if (bufd->b_carry == NULL)
+		/* Copy took place directly. Fine. */
+		goto done;
+
+	/*
+	 * Something was written to the carry over area, copy the
+	 * contents to user-space, then release the area if needed.
+	 */
+	to = (void __user *)bufd->b_ptr;
+	from = bufd->b_carry;
+	ret = cobalt_copy_to_user(to, from, len);
+
+	if (bufd->b_len > sizeof(bufd->b_buf))
+		xnfree(bufd->b_carry);
+done:
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return ret ?: (ssize_t)len;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_uwrite);
+
+/**
+ * @fn void xnbufd_reset(struct xnbufd *bufd)
+ * @brief Reset a buffer descriptor.
+ *
+ * The buffer descriptor is reset, so that all data already copied is
+ * forgotten. Any carry over buffer allocated is kept, though.
+ *
+ * @param bufd The address of the buffer descriptor to reset.
+ *
+ * @coretags{unrestricted}
+ */
+
+/**
+ * @fn void xnbufd_invalidate(struct xnbufd *bufd)
+ * @brief Invalidate a buffer descriptor.
+ *
+ * The buffer descriptor is invalidated, making it unusable for
+ * further copy operations. If an outstanding carry over buffer was
+ * allocated by a previous call to xnbufd_copy_from_kmem(), it is
+ * immediately freed so that no data transfer will happen when the
+ * descriptor is finalized.
+ *
+ * The only action that may subsequently be performed on an
+ * invalidated descriptor is calling the relevant unmapping routine
+ * for it. For that reason, xnbufd_invalidate() should be invoked on
+ * the error path when data may have been transferred to the carry
+ * over buffer.
+ *
+ * @param bufd The address of the buffer descriptor to invalidate.
+ *
+ * @coretags{unrestricted}
+ */
+void xnbufd_invalidate(struct xnbufd *bufd)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	if (bufd->b_carry) {
+		if (bufd->b_len > sizeof(bufd->b_buf))
+			xnfree(bufd->b_carry);
+		bufd->b_carry = NULL;
+	}
+	bufd->b_off = 0;
+}
+EXPORT_SYMBOL_GPL(xnbufd_invalidate);
+
+/**
+ * @fn void xnbufd_unmap_kread(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_kread().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_kread(), to read data from a kernel area.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes read so far from the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ */
+ssize_t xnbufd_unmap_kread(struct xnbufd *bufd)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_kread);
+
+/**
+ * @fn void xnbufd_unmap_kwrite(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_kwrite().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_kwrite(), to write data to a kernel area.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes written so far to the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ */
+ssize_t xnbufd_unmap_kwrite(struct xnbufd *bufd)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_kwrite);
+
+/** @} */
+++ linux-patched/kernel/xenomai/synch.c	2022-03-21 12:58:28.861894110 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/select.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/signal.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/uapi/signal.h>
+#include <trace/events/cobalt-core.h>
+
+#define PP_CEILING_MASK 0xff
+
+static inline int get_ceiling_value(struct xnsynch *synch)
+{
+	/*
+	 * The ceiling priority value is stored in user-writable
+	 * memory, make sure to constrain it within valid bounds for
+	 * xnsched_class_rt before using it.
+	 */
+	return *synch->ceiling_ref & PP_CEILING_MASK ?: 1;
+}
+
+struct xnsynch *lookup_lazy_pp(xnhandle_t handle);
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_synch Thread synchronization services
+ * @{
+ */
+
+/**
+ * @brief Initialize a synchronization object.
+ *
+ * Initializes a synchronization object. Xenomai threads can wait on
+ * and signal such objects for serializing access to resources.
+ * This object has built-in support for priority inheritance.
+ *
+ * @param synch The address of a synchronization object descriptor
+ * Cobalt will use to store the object-specific data.  This descriptor
+ * must always be valid while the object is active therefore it must
+ * be allocated in permanent memory.
+ *
+ * @param flags A set of creation flags affecting the operation. The
+ * valid flags are:
+ *
+ * - XNSYNCH_PRIO causes the threads waiting for the resource to pend
+ * in priority order. Otherwise, FIFO ordering is used (XNSYNCH_FIFO).
+ *
+ * - XNSYNCH_OWNER indicates that the synchronization object shall
+ * track the resource ownership, allowing a single owner at most at
+ * any point in time. Note that setting this flag implies the use of
+ * xnsynch_acquire() and xnsynch_release() instead of
+ * xnsynch_sleep_on() and xnsynch_wakeup_*().
+ *
+ * - XNSYNCH_PI enables priority inheritance when a priority inversion
+ * is detected among threads using this object.  XNSYNCH_PI implies
+ * XNSYNCH_OWNER and XNSYNCH_PRIO.
+ *
+ * - XNSYNCH_PP enables priority protect to prevent priority inversion.
+ * XNSYNCH_PP implies XNSYNCH_OWNER and XNSYNCH_PRIO.
+ *
+ * - XNSYNCH_DREORD (Disable REORDering) tells Cobalt not to reorder
+ * the wait list upon priority change of a waiter. Reordering is the
+ * default. Only applies when XNSYNCH_PRIO is present.
+ *
+ * @param fastlock Address of the fast lock word to be associated with
+ * a synchronization object with ownership tracking. Therefore, a
+ * valid fast-lock address is required if XNSYNCH_OWNER is set in @a
+ * flags.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnsynch_init(struct xnsynch *synch, int flags, atomic_t *fastlock)
+{
+	if (flags & (XNSYNCH_PI|XNSYNCH_PP))
+		flags |= XNSYNCH_PRIO | XNSYNCH_OWNER;	/* Obviously... */
+
+	synch->status = flags & ~XNSYNCH_CLAIMED;
+	synch->owner = NULL;
+	synch->cleanup = NULL;	/* for PI/PP only. */
+	synch->wprio = -1;
+	synch->ceiling_ref = NULL;
+	INIT_LIST_HEAD(&synch->pendq);
+
+	if (flags & XNSYNCH_OWNER) {
+		BUG_ON(fastlock == NULL);
+		synch->fastlock = fastlock;
+		atomic_set(fastlock, XN_NO_HANDLE);
+	} else
+		synch->fastlock = NULL;
+}
+EXPORT_SYMBOL_GPL(xnsynch_init);
+
+/**
+ * @brief Initialize a synchronization object enforcing PP.
+ *
+ * This call is a variant of xnsynch_init() for initializing
+ * synchronization objects enabling the priority protect protocol.
+ *
+ * @param synch The address of a synchronization object descriptor
+ * Cobalt will use to store the object-specific data.  See
+ * xnsynch_init().
+ *
+ * @param flags A set of creation flags affecting the operation. See
+ * xnsynch_init(). XNSYNCH_PI is mutually exclusive with XNSYNCH_PP,
+ * and won't be considered.
+ *
+ * @param fastlock Address of the fast lock word to be associated with
+ * a synchronization object with ownership tracking. See xnsynch_init().
+ *
+ * @param ceiling_ref The address of the variable holding the current
+ * priority ceiling value for this object.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnsynch_init_protect(struct xnsynch *synch, int flags,
+			  atomic_t *fastlock, u32 *ceiling_ref)
+{
+	xnsynch_init(synch, (flags & ~XNSYNCH_PI) | XNSYNCH_PP, fastlock);
+	synch->ceiling_ref = ceiling_ref;
+}
+
+/**
+ * @fn void xnsynch_destroy(struct xnsynch *synch)
+ * @brief Destroy a synchronization object.
+ *
+ * Destroys the synchronization object @a synch, unblocking all
+ * waiters with the XNRMID status.
+ *
+ * @return XNSYNCH_RESCHED is returned if at least one thread is
+ * unblocked, which means the caller should invoke xnsched_run() for
+ * applying the new scheduling state. Otherwise, XNSYNCH_DONE is
+ * returned.
+
+ * @sideeffect Same as xnsynch_flush().
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnsynch_destroy(struct xnsynch *synch)
+{
+	int ret;
+	
+	ret = xnsynch_flush(synch, XNRMID);
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_CLAIMED);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnsynch_destroy);
+
+/**
+ * @fn int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode);
+ * @brief Sleep on an ownerless synchronization object.
+ *
+ * Makes the calling thread sleep on the specified synchronization
+ * object, waiting for it to be signaled.
+ *
+ * This service should be called by upper interfaces wanting the
+ * current thread to pend on the given resource. It must not be used
+ * with synchronization objects that are supposed to track ownership
+ * (XNSYNCH_OWNER).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to sleep on.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread pends on the resource. This value is a wait time given as a
+ * count of nanoseconds. It can either be relative, absolute
+ * monotonic, or absolute adjustable depending on @a
+ * timeout_mode. Passing XN_INFINITE @b and setting @a mode to
+ * XN_RELATIVE specifies an unbounded wait. All other values are used
+ * to initialize a watchdog timer.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @return A bitmask which may include zero or one information bit
+ * among XNRMID, XNTIMEO and XNBREAK, which should be tested by the
+ * caller, for detecting respectively: object deletion, timeout or
+ * signal/unblock conditions which might have happened while waiting.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout,
+		     xntmode_t timeout_mode)
+{
+	struct xnthread *thread;
+	spl_t s;
+
+	primary_mode_only();
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	thread = xnthread_current();
+
+	if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP) &&
+	    thread->res_count > 0 &&
+	    xnthread_test_state(thread, XNWARN))
+		xnthread_signal(thread, SIGDEBUG, SIGDEBUG_MUTEX_SLEEP);
+	
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_synch_sleepon(synch);
+
+	if ((synch->status & XNSYNCH_PRIO) == 0) /* i.e. FIFO */
+		list_add_tail(&thread->plink, &synch->pendq);
+	else /* i.e. priority-sorted */
+		list_add_priff(thread, &synch->pendq, wprio, plink);
+
+	xnthread_suspend(thread, XNPEND, timeout, timeout_mode, synch);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
+}
+EXPORT_SYMBOL_GPL(xnsynch_sleep_on);
+
+/**
+ * @fn struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch);
+ * @brief Unblock the heading thread from wait.
+ *
+ * This service wakes up the thread which is currently leading the
+ * synchronization object's pending list. The sleeping thread is
+ * unblocked from its pending state, but no reschedule is performed.
+ *
+ * This service should be called by upper interfaces wanting to signal
+ * the given resource so that a single waiter is resumed. It must not
+ * be used with synchronization objects that are supposed to track
+ * ownership (XNSYNCH_OWNER not set).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * whose ownership is changed.
+ *
+ * @return The descriptor address of the unblocked thread.
+ *
+ * @coretags{unrestricted}
+ */
+struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch)
+{
+	struct xnthread *thread;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&synch->pendq)) {
+		thread = NULL;
+		goto out;
+	}
+
+	trace_cobalt_synch_wakeup(synch);
+	thread = list_first_entry(&synch->pendq, struct xnthread, plink);
+	list_del(&thread->plink);
+	thread->wchan = NULL;
+	xnthread_resume(thread, XNPEND);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return thread;
+}
+EXPORT_SYMBOL_GPL(xnsynch_wakeup_one_sleeper);
+
+int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr)
+{
+	struct xnthread *thread, *tmp;
+	int nwakeups = 0;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&synch->pendq))
+		goto out;
+
+	trace_cobalt_synch_wakeup_many(synch);
+
+	list_for_each_entry_safe(thread, tmp, &synch->pendq, plink) {
+		if (nwakeups++ >= nr)
+			break;
+		list_del(&thread->plink);
+		thread->wchan = NULL;
+		xnthread_resume(thread, XNPEND);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return nwakeups;
+}
+EXPORT_SYMBOL_GPL(xnsynch_wakeup_many_sleepers);
+
+/**
+ * @fn void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct xnthread *sleeper);
+ * @brief Unblock a particular thread from wait.
+ *
+ * This service wakes up a specific thread which is currently pending on
+ * the given synchronization object. The sleeping thread is unblocked
+ * from its pending state, but no reschedule is performed.
+ *
+ * This service should be called by upper interfaces wanting to signal
+ * the given resource so that a specific waiter is resumed. It must not
+ * be used with synchronization objects that are supposed to track
+ * ownership (XNSYNCH_OWNER not set).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * whose ownership is changed.
+ *
+ * @param sleeper The thread to unblock which MUST be currently linked
+ * to the synchronization object's pending queue (i.e. synch->pendq).
+ *
+ * @coretags{unrestricted}
+ */
+void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct xnthread *sleeper)
+{
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_synch_wakeup(synch);
+	list_del(&sleeper->plink);
+	sleeper->wchan = NULL;
+	xnthread_resume(sleeper, XNPEND);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnsynch_wakeup_this_sleeper);
+
+static inline void raise_boost_flag(struct xnthread *owner)
+{
+	/* Backup the base priority at first boost only. */
+	if (!xnthread_test_state(owner, XNBOOST)) {
+		owner->bprio = owner->cprio;
+		xnthread_set_state(owner, XNBOOST);
+	}
+}
+
+static void inherit_thread_priority(struct xnthread *owner,
+				    struct xnthread *target)
+{
+	if (xnthread_test_state(owner, XNZOMBIE))
+		return;
+	
+	/* Apply the scheduling policy of "target" to "thread" */
+	xnsched_track_policy(owner, target);
+
+	/*
+	 * Owner may be sleeping, propagate priority update through
+	 * the PI chain if needed.
+	 */
+	if (owner->wchan)
+		xnsynch_requeue_sleeper(owner);
+}
+
+static void __ceil_owner_priority(struct xnthread *owner, int prio)
+{
+	if (xnthread_test_state(owner, XNZOMBIE))
+		return;
+	/*
+	 * Raise owner priority to the ceiling value, this implicitly
+	 * selects SCHED_FIFO for the owner.
+	 */
+	xnsched_protect_priority(owner, prio);
+
+	if (owner->wchan)
+		xnsynch_requeue_sleeper(owner);
+}
+
+static void adjust_boost(struct xnthread *owner, struct xnthread *target)
+{
+	struct xnsynch *synch;
+
+	/*
+	 * CAUTION: we may have PI and PP-enabled objects among the
+	 * boosters, considering the leader of synch->pendq is
+	 * therefore NOT enough for determining the next boost
+	 * priority, since PP is tracked on acquisition, not on
+	 * contention. Check the head of the booster list instead.
+	 */
+	synch = list_first_entry(&owner->boosters, struct xnsynch, next);
+	if (synch->wprio == owner->wprio)
+		return;
+	
+	if (synch->status & XNSYNCH_PP)
+		__ceil_owner_priority(owner, get_ceiling_value(synch));
+	else {
+		XENO_BUG_ON(COBALT, list_empty(&synch->pendq));
+		if (target == NULL)
+			target = list_first_entry(&synch->pendq,
+						  struct xnthread, plink);
+		inherit_thread_priority(owner, target);
+	}
+}
+
+static void ceil_owner_priority(struct xnsynch *synch)
+{
+	struct xnthread *owner = synch->owner;
+	int wprio;
+
+	/* PP ceiling values are implicitly based on the RT class. */
+	wprio = xnsched_calc_wprio(&xnsched_class_rt,
+				   get_ceiling_value(synch));
+	synch->wprio = wprio;
+	list_add_priff(synch, &owner->boosters, wprio, next);
+	raise_boost_flag(owner);
+	synch->status |= XNSYNCH_CEILING;
+
+	/*
+	 * If the ceiling value is lower than the current effective
+	 * priority, we must not adjust the latter.  BEWARE: not only
+	 * this restriction is required to keep the PP logic right,
+	 * but this is also a basic assumption made by all
+	 * xnthread_commit_ceiling() callers which won't check for any
+	 * rescheduling opportunity upon return.
+	 *
+	 * However we do want the object to be linked to the booster
+	 * list, and XNBOOST must appear in the current thread status.
+	 *
+	 * This way, setparam() won't be allowed to decrease the
+	 * current weighted priority below the ceiling value, until we
+	 * eventually release this object.
+	 */
+	if (wprio > owner->wprio)
+		adjust_boost(owner, NULL);
+}
+
+static inline
+void track_owner(struct xnsynch *synch, struct xnthread *owner)
+{
+	synch->owner = owner;
+}
+
+static inline  /* nklock held, irqs off */
+void set_current_owner_locked(struct xnsynch *synch, struct xnthread *owner)
+{
+	/*
+	 * Update the owner information, and apply priority protection
+	 * for PP objects. We may only get there if owner is current,
+	 * or blocked.
+	 */
+	track_owner(synch, owner);
+	if (synch->status & XNSYNCH_PP)
+		ceil_owner_priority(synch);
+}
+
+static inline
+void set_current_owner(struct xnsynch *synch, struct xnthread *owner)
+{
+	spl_t s;
+
+	track_owner(synch, owner);
+	if (synch->status & XNSYNCH_PP) {
+		xnlock_get_irqsave(&nklock, s);
+		ceil_owner_priority(synch);
+		xnlock_put_irqrestore(&nklock, s);
+	}
+}
+
+static inline
+xnhandle_t get_owner_handle(xnhandle_t ownerh, struct xnsynch *synch)
+{
+	/*
+	 * On acquisition from kernel space, the fast lock handle
+	 * should bear the FLCEIL bit for PP objects, so that userland
+	 * takes the slow path on release, jumping to the kernel for
+	 * dropping the ceiling priority boost.
+	 */
+	if (synch->status & XNSYNCH_PP)
+		ownerh = xnsynch_fast_ceiling(ownerh);
+
+	return ownerh;
+}
+
+static void commit_ceiling(struct xnsynch *synch, struct xnthread *curr)
+{
+	xnhandle_t oldh, h;
+	atomic_t *lockp;
+
+	track_owner(synch, curr);
+	ceil_owner_priority(synch);
+	/*
+	 * Raise FLCEIL, which indicates a kernel entry will be
+	 * required for releasing this resource.
+	 */
+	lockp = xnsynch_fastlock(synch);
+	do {
+		h = atomic_read(lockp);
+		oldh = atomic_cmpxchg(lockp, h, xnsynch_fast_ceiling(h));
+	} while (oldh != h);
+}
+
+void xnsynch_commit_ceiling(struct xnthread *curr)  /* nklock held, irqs off */
+{
+	struct xnsynch *synch;
+	atomic_t *lockp;
+
+	/* curr->u_window has to be valid, curr bears XNUSER. */
+	synch = lookup_lazy_pp(curr->u_window->pp_pending);
+	if (synch == NULL) {
+		/*
+		 * If pp_pending is a bad handle, don't panic but
+		 * rather ignore: we don't want a misbehaving userland
+		 * to crash the kernel.
+		 */
+		XENO_WARN_ON_ONCE(USER, 1);
+		goto out;
+	}
+
+	/*
+	 * For PP locks, userland does, in that order:
+	 *
+	 * -- LOCK
+	 * 1. curr->u_window->pp_pending = lock_handle
+	 *    barrier();
+	 * 2. atomic_cmpxchg(lockp, XN_NO_HANDLE, curr->handle);
+	 *
+	 * -- UNLOCK
+	 * 1. atomic_cmpxchg(lockp, curr->handle, XN_NO_HANDLE); [unclaimed]
+	 *    barrier();
+	 * 2. curr->u_window->pp_pending = XN_NO_HANDLE
+	 *
+	 * Make sure we have not been caught in a rescheduling in
+	 * between those steps. If we did, then we won't be holding
+	 * the lock as we schedule away, therefore no priority update
+	 * must take place.
+	 */
+	lockp = xnsynch_fastlock(synch);
+	if (xnsynch_fast_owner_check(lockp, curr->handle))
+		return;
+
+	/*
+	 * In rare cases, we could be called multiple times for
+	 * committing a lazy ceiling for the same object, e.g. if
+	 * userland is preempted in the middle of a recursive locking
+	 * sequence.
+	 *
+	 * This stems from the fact that userland has to update
+	 * ->pp_pending prior to trying to grab the lock atomically,
+	 * at which point it can figure out whether a recursive
+	 * locking happened. We get out of this trap by testing the
+	 * XNSYNCH_CEILING flag.
+	 */
+	if ((synch->status & XNSYNCH_CEILING) == 0)
+		commit_ceiling(synch, curr);
+out:
+	curr->u_window->pp_pending = XN_NO_HANDLE;
+}
+
+/**
+ * @fn int xnsynch_try_acquire(struct xnsynch *synch);
+ * @brief Try acquiring the ownership of a synchronization object.
+ *
+ * This service should be called by upper interfaces wanting the
+ * current thread to acquire the ownership of the given resource. If
+ * the resource is already assigned to another thread, the call
+ * returns with an error code.
+ *
+ * This service must be used only with synchronization objects that
+ * track ownership (XNSYNCH_OWNER set.
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to acquire.
+ *
+ * @return Zero is returned if @a synch has been successfully
+ * acquired. Otherwise:
+ *
+ * - -EDEADLK is returned if @a synch is currently held by the calling
+ * thread.
+ *
+ * - -EBUSY is returned if @a synch is currently held by another
+ * thread.
+ *
+ * @coretags{primary-only}
+ */
+int xnsynch_try_acquire(struct xnsynch *synch)
+{
+	struct xnthread *curr;
+	atomic_t *lockp;
+	xnhandle_t h;
+
+	primary_mode_only();
+
+	XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
+
+	curr = xnthread_current();
+	lockp = xnsynch_fastlock(synch);
+	trace_cobalt_synch_try_acquire(synch);
+
+	h = atomic_cmpxchg(lockp, XN_NO_HANDLE,
+			   get_owner_handle(curr->handle, synch));
+	if (h != XN_NO_HANDLE)
+		return xnhandle_get_id(h) == curr->handle ?
+			-EDEADLK : -EBUSY;
+
+	set_current_owner(synch, curr);
+	xnthread_get_resource(curr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsynch_try_acquire);
+
+/**
+ * @fn int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode);
+ * @brief Acquire the ownership of a synchronization object.
+ *
+ * This service should be called by upper interfaces wanting the
+ * current thread to acquire the ownership of the given resource. If
+ * the resource is already assigned to another thread, the caller is
+ * suspended.
+ *
+ * This service must be used only with synchronization objects that
+ * track ownership (XNSYNCH_OWNER set.
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to acquire.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread pends on the resource. This value is a wait time given as a
+ * count of nanoseconds. It can either be relative, absolute
+ * monotonic, or absolute adjustable depending on @a
+ * timeout_mode. Passing XN_INFINITE @b and setting @a mode to
+ * XN_RELATIVE specifies an unbounded wait. All other values are used
+ * to initialize a watchdog timer.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @return A bitmask which may include zero or one information bit
+ * among XNRMID, XNTIMEO and XNBREAK, which should be tested by the
+ * caller, for detecting respectively: object deletion, timeout or
+ * signal/unblock conditions which might have happened while waiting.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note Unlike xnsynch_try_acquire(), this call does NOT check for
+ * invalid recursive locking request, which means that such request
+ * will always cause a deadlock for the caller.
+ */
+int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
+		    xntmode_t timeout_mode)
+{
+	struct xnthread *curr, *owner;
+	xnhandle_t currh, h, oldh;
+	atomic_t *lockp;
+	spl_t s;
+
+	primary_mode_only();
+
+	XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
+
+	curr = xnthread_current();
+	currh = curr->handle;
+	lockp = xnsynch_fastlock(synch);
+	trace_cobalt_synch_acquire(synch);
+redo:
+	/* Basic form of xnsynch_try_acquire(). */
+	h = atomic_cmpxchg(lockp, XN_NO_HANDLE,
+			   get_owner_handle(currh, synch));
+	if (likely(h == XN_NO_HANDLE)) {
+		set_current_owner(synch, curr);
+		xnthread_get_resource(curr);
+		return 0;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/*
+	 * Set claimed bit.  In case it appears to be set already,
+	 * re-read its state under nklock so that we don't miss any
+	 * change between the lock-less read and here. But also try to
+	 * avoid cmpxchg where possible. Only if it appears not to be
+	 * set, start with cmpxchg directly.
+	 */
+	if (xnsynch_fast_is_claimed(h)) {
+		oldh = atomic_read(lockp);
+		goto test_no_owner;
+	}
+
+	do {
+		oldh = atomic_cmpxchg(lockp, h, xnsynch_fast_claimed(h));
+		if (likely(oldh == h))
+			break;
+	test_no_owner:
+		if (oldh == XN_NO_HANDLE) {
+			/* Mutex released from another cpu. */
+			xnlock_put_irqrestore(&nklock, s);
+			goto redo;
+		}
+		h = oldh;
+	} while (!xnsynch_fast_is_claimed(h));
+
+	owner = xnthread_lookup(h);
+	if (owner == NULL) {
+		/*
+		 * The handle is broken, therefore pretend that the
+		 * synch object was deleted to signal an error.
+		 */
+		xnthread_set_info(curr, XNRMID);
+		goto out;
+	}
+
+	/*
+	 * This is the contended path. We just detected an earlier
+	 * syscall-less fast locking from userland, fix up the
+	 * in-kernel state information accordingly.
+	 *
+	 * The consistency of the state information is guaranteed,
+	 * because we just raised the claim bit atomically for this
+	 * contended lock, therefore userland will have to jump to the
+	 * kernel when releasing it, instead of doing a fast
+	 * unlock. Since we currently own the superlock, consistency
+	 * wrt transfer_ownership() is guaranteed through
+	 * serialization.
+	 *
+	 * CAUTION: in this particular case, the only assumptions we
+	 * can safely make is that *owner is valid but not current on
+	 * this CPU.
+	 */
+	track_owner(synch, owner);
+	xnsynch_detect_relaxed_owner(synch, curr);
+
+	if ((synch->status & XNSYNCH_PRIO) == 0) { /* i.e. FIFO */
+		list_add_tail(&curr->plink, &synch->pendq);
+		goto block;
+	}
+
+	if (curr->wprio > owner->wprio) {
+		if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) {
+			/* Ownership is still pending, steal the resource. */
+			set_current_owner_locked(synch, curr);
+			xnthread_clear_info(curr, XNRMID | XNTIMEO | XNBREAK);
+			xnthread_set_info(owner, XNROBBED);
+			goto grab;
+		}
+
+		list_add_priff(curr, &synch->pendq, wprio, plink);
+
+		if (synch->status & XNSYNCH_PI) {
+			raise_boost_flag(owner);
+
+			if (synch->status & XNSYNCH_CLAIMED)
+				list_del(&synch->next); /* owner->boosters */
+			else
+				synch->status |= XNSYNCH_CLAIMED;
+
+			synch->wprio = curr->wprio;
+			list_add_priff(synch, &owner->boosters, wprio, next);
+			/*
+			 * curr->wprio > owner->wprio implies that
+			 * synch must be leading the booster list
+			 * after insertion, so we may call
+			 * inherit_thread_priority() for tracking
+			 * current's priority directly without going
+			 * through adjust_boost().
+			 */
+			inherit_thread_priority(owner, curr);
+		}
+	} else
+		list_add_priff(curr, &synch->pendq, wprio, plink);
+block:
+	xnthread_suspend(curr, XNPEND, timeout, timeout_mode, synch);
+	curr->wwake = NULL;
+	xnthread_clear_info(curr, XNWAKEN);
+
+	if (xnthread_test_info(curr, XNRMID | XNTIMEO | XNBREAK))
+		goto out;
+
+	if (xnthread_test_info(curr, XNROBBED)) {
+		/*
+		 * Somebody stole us the ownership while we were ready
+		 * to run, waiting for the CPU: we need to wait again
+		 * for the resource.
+		 */
+		if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) {
+			xnlock_put_irqrestore(&nklock, s);
+			goto redo;
+		}
+		timeout = xntimer_get_timeout_stopped(&curr->rtimer);
+		if (timeout > 1) { /* Otherwise, it's too late. */
+			xnlock_put_irqrestore(&nklock, s);
+			goto redo;
+		}
+		xnthread_set_info(curr, XNTIMEO);
+		goto out;
+	}
+grab:
+	xnthread_get_resource(curr);
+
+	if (xnsynch_pended_p(synch))
+		currh = xnsynch_fast_claimed(currh);
+
+	/* Set new ownership for this object. */
+	atomic_set(lockp, get_owner_handle(currh, synch));
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return xnthread_test_info(curr, XNRMID|XNTIMEO|XNBREAK);
+}
+EXPORT_SYMBOL_GPL(xnsynch_acquire);
+
+static void drop_booster(struct xnsynch *synch, struct xnthread *owner)
+{
+	list_del(&synch->next);	/* owner->boosters */
+
+	if (list_empty(&owner->boosters)) {
+		xnthread_clear_state(owner, XNBOOST);
+		inherit_thread_priority(owner, owner);
+	} else
+		adjust_boost(owner, NULL);
+}
+
+static inline void clear_pi_boost(struct xnsynch *synch,
+				  struct xnthread *owner)
+{	/* nklock held, irqs off */
+	synch->status &= ~XNSYNCH_CLAIMED;
+	drop_booster(synch, owner);
+}
+
+static inline void clear_pp_boost(struct xnsynch *synch,
+				  struct xnthread *owner)
+{	/* nklock held, irqs off */
+	synch->status &= ~XNSYNCH_CEILING;
+	drop_booster(synch, owner);
+}
+
+static bool transfer_ownership(struct xnsynch *synch,
+			       struct xnthread *lastowner)
+{				/* nklock held, irqs off */
+	struct xnthread *nextowner;
+	xnhandle_t nextownerh;
+	atomic_t *lockp;
+
+	lockp = xnsynch_fastlock(synch);
+
+	/*
+	 * Our caller checked for contention locklessly, so we do have
+	 * to check again under lock in a different way.
+	 */
+	if (list_empty(&synch->pendq)) {
+		synch->owner = NULL;
+		atomic_set(lockp, XN_NO_HANDLE);
+		return false;
+	}
+
+	nextowner = list_first_entry(&synch->pendq, struct xnthread, plink);
+	list_del(&nextowner->plink);
+	nextowner->wchan = NULL;
+	nextowner->wwake = synch;
+	set_current_owner_locked(synch, nextowner);
+	xnthread_set_info(nextowner, XNWAKEN);
+	xnthread_resume(nextowner, XNPEND);
+
+	if (synch->status & XNSYNCH_CLAIMED)
+		clear_pi_boost(synch, lastowner);
+
+	nextownerh = get_owner_handle(nextowner->handle, synch);
+	if (xnsynch_pended_p(synch))
+		nextownerh = xnsynch_fast_claimed(nextownerh);
+
+	atomic_set(lockp, nextownerh);
+
+	return true;
+}
+
+/**
+ * @fn bool xnsynch_release(struct xnsynch *synch, struct xnthread *curr)
+ * @brief Release a resource and pass it to the next waiting thread.
+ *
+ * This service releases the ownership of the given synchronization
+ * object. The thread which is currently leading the object's pending
+ * list, if any, is unblocked from its pending state. However, no
+ * reschedule is performed.
+ *
+ * This service must be used only with synchronization objects that
+ * track ownership (XNSYNCH_OWNER set).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * whose ownership is changed.
+ *
+ * @param curr The descriptor address of the current thread, which
+ * must own the object at the time of calling.
+ *
+ * @return True if a reschedule is required.
+ *
+ * @sideeffect
+ *
+ * - The effective priority of the previous resource owner might be
+ * lowered to its base priority value as a consequence of the priority
+ * boost being cleared.
+ *
+ * - The synchronization object ownership is transfered to the
+ * unblocked thread.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+bool xnsynch_release(struct xnsynch *synch, struct xnthread *curr)
+{
+	bool need_resched = false;
+	xnhandle_t currh, h;
+	atomic_t *lockp;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
+
+	trace_cobalt_synch_release(synch);
+
+	if (xnthread_put_resource(curr))
+		return false;
+
+	lockp = xnsynch_fastlock(synch);
+	currh = curr->handle;
+	/*
+	 * FLCEIL may only be raised by the owner, or when the owner
+	 * is blocked waiting for the synch (ownership transfer). In
+	 * addition, only the current owner of a synch may release it,
+	 * therefore we can't race while testing FLCEIL locklessly.
+	 * All updates to FLCLAIM are covered by the superlock.
+	 *
+	 * Therefore, clearing the fastlock racelessly in this routine
+	 * without leaking FLCEIL/FLCLAIM updates can be achieved by
+	 * holding the superlock.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	if (synch->status & XNSYNCH_CEILING) {
+		clear_pp_boost(synch, curr);
+		need_resched = true;
+	}
+
+	h = atomic_cmpxchg(lockp, currh, XN_NO_HANDLE);
+	if ((h & ~XNSYNCH_FLCEIL) != currh)
+		/* FLCLAIM set, synch is contended. */
+		need_resched = transfer_ownership(synch, curr);
+	else if (h != currh)	/* FLCEIL set, FLCLAIM clear. */
+		atomic_set(lockp, XN_NO_HANDLE);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return need_resched;
+}
+EXPORT_SYMBOL_GPL(xnsynch_release);
+
+void xnsynch_requeue_sleeper(struct xnthread *thread)
+{				/* nklock held, irqs off */
+	struct xnsynch *synch = thread->wchan;
+	struct xnthread *owner;
+
+	XENO_BUG_ON(COBALT, !(synch->status & XNSYNCH_PRIO));
+
+	/*
+	 * Update the position in the pend queue of a thread waiting
+	 * for a lock. This routine propagates the change throughout
+	 * the PI chain if required.
+	 */
+	list_del(&thread->plink);
+	list_add_priff(thread, &synch->pendq, wprio, plink);
+	owner = synch->owner;
+
+	/* Only PI-enabled objects are of interest here. */
+	if ((synch->status & XNSYNCH_PI) == 0)
+		return;
+
+	synch->wprio = thread->wprio;
+	if (synch->status & XNSYNCH_CLAIMED)
+		list_del(&synch->next);
+	else {
+		synch->status |= XNSYNCH_CLAIMED;
+		raise_boost_flag(owner);
+	}
+
+	list_add_priff(synch, &owner->boosters, wprio, next);
+	adjust_boost(owner, thread);
+}
+EXPORT_SYMBOL_GPL(xnsynch_requeue_sleeper);
+
+/**
+ * @fn struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch);
+ * @brief Access the thread leading a synch object wait queue.
+ *
+ * This services returns the descriptor address of to the thread leading a
+ * synchronization object wait queue.
+ *
+ * @param synch The descriptor address of the target synchronization object.
+ *
+ * @return The descriptor address of the unblocked thread.
+ *
+ * @coretags{unrestricted}
+ */
+struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch)
+{
+	struct xnthread *thread = NULL;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!list_empty(&synch->pendq))
+		thread = list_first_entry(&synch->pendq,
+					  struct xnthread, plink);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return thread;
+}
+EXPORT_SYMBOL_GPL(xnsynch_peek_pendq);
+
+/**
+ * @fn int xnsynch_flush(struct xnsynch *synch, int reason);
+ * @brief Unblock all waiters pending on a resource.
+ *
+ * This service atomically releases all threads which currently sleep
+ * on a given resource. This service should be called by upper
+ * interfaces under circumstances requiring that the pending queue of
+ * a given resource is cleared, such as before the resource is
+ * deleted.
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to be flushed.
+ *
+ * @param reason Some flags to set in the information mask of every
+ * unblocked thread. Zero is an acceptable value. The following bits
+ * are pre-defined by Cobalt:
+ *
+ * - XNRMID should be set to indicate that the synchronization object
+ * is about to be destroyed (see xnthread_resume()).
+ *
+ * - XNBREAK should be set to indicate that the wait has been forcibly
+ * interrupted (see xnthread_unblock()).
+ *
+ * @return XNSYNCH_RESCHED is returned if at least one thread is
+ * unblocked, which means the caller should invoke xnsched_run() for
+ * applying the new scheduling state. Otherwise, XNSYNCH_DONE is
+ * returned.
+ *
+ * @sideeffect
+ *
+ * - The effective priority of the current resource owner might be
+ * lowered to its base priority value as a consequence of the priority
+ * inheritance boost being cleared.
+ *
+ * @coretags{unrestricted}
+ */
+int xnsynch_flush(struct xnsynch *synch, int reason)
+{
+	struct xnthread *sleeper, *tmp;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_synch_flush(synch);
+
+	if (list_empty(&synch->pendq)) {
+		XENO_BUG_ON(COBALT, synch->status & XNSYNCH_CLAIMED);
+		ret = XNSYNCH_DONE;
+	} else {
+		ret = XNSYNCH_RESCHED;
+		list_for_each_entry_safe(sleeper, tmp, &synch->pendq, plink) {
+			list_del(&sleeper->plink);
+			xnthread_set_info(sleeper, reason);
+			sleeper->wchan = NULL;
+			xnthread_resume(sleeper, XNPEND);
+		}
+		if (synch->status & XNSYNCH_CLAIMED)
+			clear_pi_boost(synch, synch->owner);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnsynch_flush);
+
+void xnsynch_forget_sleeper(struct xnthread *thread)
+{				/* nklock held, irqs off */
+	struct xnsynch *synch = thread->wchan;
+	struct xnthread *owner, *target;
+
+	/*
+	 * Do all the necessary housekeeping chores to stop a thread
+	 * from waiting on a given synchronization object. Doing so
+	 * may require to update a PI chain.
+	 */
+	trace_cobalt_synch_forget(synch);
+
+	xnthread_clear_state(thread, XNPEND);
+	thread->wchan = NULL;
+	list_del(&thread->plink); /* synch->pendq */
+
+	/*
+	 * Only a sleeper leaving a PI chain triggers an update.
+	 * NOTE: PP objects never bear the CLAIMED bit.
+	 */
+	if ((synch->status & XNSYNCH_CLAIMED) == 0)
+		return;
+
+	owner = synch->owner;
+
+	if (list_empty(&synch->pendq)) {
+		/* No more sleepers: clear the PI boost. */
+		clear_pi_boost(synch, owner);
+		return;
+	}
+
+	/*
+	 * Reorder the booster queue of the current owner after we
+	 * left the wait list, then set its priority to the new
+	 * required minimum required to prevent priority inversion.
+	 */
+	target = list_first_entry(&synch->pendq, struct xnthread, plink);
+	synch->wprio = target->wprio;
+	list_del(&synch->next);	/* owner->boosters */
+	list_add_priff(synch, &owner->boosters, wprio, next);
+	adjust_boost(owner, target);
+}
+EXPORT_SYMBOL_GPL(xnsynch_forget_sleeper);
+
+#ifdef CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED
+
+/*
+ * Detect when a thread is about to sleep on a synchronization
+ * object currently owned by someone running in secondary mode.
+ */
+void xnsynch_detect_relaxed_owner(struct xnsynch *synch,
+				  struct xnthread *sleeper)
+{
+	if (xnthread_test_state(sleeper, XNWARN) &&
+	    !xnthread_test_info(sleeper, XNPIALERT) &&
+	    xnthread_test_state(synch->owner, XNRELAX)) {
+		xnthread_set_info(sleeper, XNPIALERT);
+		__xnthread_signal(sleeper, SIGDEBUG,
+				  SIGDEBUG_MIGRATE_PRIOINV);
+	} else
+		xnthread_clear_info(sleeper,  XNPIALERT);
+}
+
+/*
+ * Detect when a thread is about to relax while holding booster(s)
+ * (claimed PI or active PP object), which denotes a potential for
+ * priority inversion. In such an event, any sleeper bearing the
+ * XNWARN bit will receive a SIGDEBUG notification.
+ */
+void xnsynch_detect_boosted_relax(struct xnthread *owner)
+{
+	struct xnthread *sleeper;
+	struct xnsynch *synch;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnthread_for_each_booster(synch, owner) {
+		xnsynch_for_each_sleeper(sleeper, synch) {
+			if (xnthread_test_state(sleeper, XNWARN)) {
+				xnthread_set_info(sleeper, XNPIALERT);
+				__xnthread_signal(sleeper, SIGDEBUG,
+						  SIGDEBUG_MIGRATE_PRIOINV);
+			}
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+#endif /* CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */
+
+/** @} */
+++ linux-patched/kernel/xenomai/select.c	2022-03-21 12:58:28.857894149 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/debug.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2008 Efixo
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/bitops.h>	/* For hweight_long */
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/select.h>
+#include <pipeline/sirq.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_select Synchronous I/O multiplexing
+ *
+ * This module implements the services needed for implementing the
+ * POSIX select() service, or any other event multiplexing services.
+ *
+ * Following the implementation of the posix select service, this module defines
+ * three types of events:
+ * - \a XNSELECT_READ meaning that a file descriptor is ready for reading;
+ * - \a XNSELECT_WRITE meaning that a file descriptor is ready for writing;
+ * - \a XNSELECT_EXCEPT meaning that a file descriptor received an exceptional
+ *   event.
+ *
+ * It works by defining two structures:
+ * - a @a struct @a xnselect structure, which should be added to every file
+ * descriptor for every event type (read, write, or except);
+ * - a @a struct @a xnselector structure, the selection structure,  passed by
+ * the thread calling the xnselect service, where this service does all its
+ * housekeeping.
+ * @{
+ */
+
+static LIST_HEAD(selector_list);
+static int deletion_virq;
+
+/**
+ * Initialize a @a struct @a xnselect structure.
+ *
+ * This service must be called to initialize a @a struct @a xnselect structure
+ * before it is bound to a selector by the means of xnselect_bind().
+ *
+ * @param select_block pointer to the xnselect structure to be initialized
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnselect_init(struct xnselect *select_block)
+{
+	INIT_LIST_HEAD(&select_block->bindings);
+}
+EXPORT_SYMBOL_GPL(xnselect_init);
+
+static inline int xnselect_wakeup(struct xnselector *selector)
+{
+	return xnsynch_flush(&selector->synchbase, 0) == XNSYNCH_RESCHED;
+}
+
+/**
+ * Bind a file descriptor (represented by its @a xnselect structure) to a
+ * selector block.
+ *
+ * @param select_block pointer to the @a struct @a xnselect to be bound;
+ *
+ * @param binding pointer to a newly allocated (using xnmalloc) @a struct
+ * @a xnselect_binding;
+ *
+ * @param selector pointer to the selector structure;
+ *
+ * @param type type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a
+ * XNSELECT_EXCEPT);
+ *
+ * @param index index of the file descriptor (represented by @a
+ * select_block) in the bit fields used by the @a selector structure;
+ *
+ * @param state current state of the file descriptor.
+ *
+ * @a select_block must have been initialized with xnselect_init(),
+ * the @a xnselector structure must have been initialized with
+ * xnselector_init(), @a binding may be uninitialized.
+ *
+ * This service must be called with nklock locked, irqs off. For this reason,
+ * the @a binding parameter must have been allocated by the caller outside the
+ * locking section.
+ *
+ * @retval -EINVAL if @a type or @a index is invalid;
+ * @retval 0 otherwise.
+ *
+ * @coretags{task-unrestricted, might-switch, atomic-entry}
+ */
+int xnselect_bind(struct xnselect *select_block,
+		  struct xnselect_binding *binding,
+		  struct xnselector *selector,
+		  unsigned type,
+		  unsigned index,
+		  unsigned state)
+{
+	atomic_only();
+
+	if (type >= XNSELECT_MAX_TYPES || index > __FD_SETSIZE)
+		return -EINVAL;
+
+	binding->selector = selector;
+	binding->fd = select_block;
+	binding->type = type;
+	binding->bit_index = index;
+
+	list_add_tail(&binding->slink, &selector->bindings);
+	list_add_tail(&binding->link, &select_block->bindings);
+	__FD_SET__(index, &selector->fds[type].expected);
+	if (state) {
+		__FD_SET__(index, &selector->fds[type].pending);
+		if (xnselect_wakeup(selector))
+			xnsched_run();
+	} else
+		__FD_CLR__(index, &selector->fds[type].pending);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnselect_bind);
+
+/* Must be called with nklock locked irqs off */
+int __xnselect_signal(struct xnselect *select_block, unsigned state)
+{
+	struct xnselect_binding *binding;
+	struct xnselector *selector;
+	int resched = 0;
+
+	list_for_each_entry(binding, &select_block->bindings, link) {
+		selector = binding->selector;
+		if (state) {
+			if (!__FD_ISSET__(binding->bit_index,
+					&selector->fds[binding->type].pending)) {
+				__FD_SET__(binding->bit_index,
+					 &selector->fds[binding->type].pending);
+				if (xnselect_wakeup(selector))
+					resched = 1;
+			}
+		} else
+			__FD_CLR__(binding->bit_index,
+				 &selector->fds[binding->type].pending);
+	}
+
+	return resched;
+}
+EXPORT_SYMBOL_GPL(__xnselect_signal);
+
+/**
+ * Destroy the @a xnselect structure associated with a file descriptor.
+ *
+ * Any binding with a @a xnselector block is destroyed.
+ *
+ * @param select_block pointer to the @a xnselect structure associated
+ * with a file descriptor
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void xnselect_destroy(struct xnselect *select_block)
+{
+	struct xnselect_binding *binding, *tmp;
+	struct xnselector *selector;
+	int resched = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&select_block->bindings))
+		goto out;
+
+	list_for_each_entry_safe(binding, tmp, &select_block->bindings, link) {
+		list_del(&binding->link);
+		selector = binding->selector;
+		__FD_CLR__(binding->bit_index,
+			 &selector->fds[binding->type].expected);
+		if (!__FD_ISSET__(binding->bit_index,
+				&selector->fds[binding->type].pending)) {
+			__FD_SET__(binding->bit_index,
+				 &selector->fds[binding->type].pending);
+			if (xnselect_wakeup(selector))
+				resched = 1;
+		}
+		list_del(&binding->slink);
+		xnlock_put_irqrestore(&nklock, s);
+		xnfree(binding);
+		xnlock_get_irqsave(&nklock, s);
+	}
+	if (resched)
+		xnsched_run();
+out:
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnselect_destroy);
+
+static unsigned
+fd_set_andnot(fd_set *result, fd_set *first, fd_set *second, unsigned n)
+{
+	unsigned i, not_empty = 0;
+
+	for (i = 0; i < __FDELT__(n); i++)
+		if((result->fds_bits[i] =
+		    first->fds_bits[i] & ~(second->fds_bits[i])))
+			not_empty = 1;
+
+	if (i < __FDSET_LONGS__
+	    && (result->fds_bits[i] =
+		first->fds_bits[i] & ~(second->fds_bits[i]) & (__FDMASK__(n) - 1)))
+		not_empty = 1;
+
+	return not_empty;
+}
+
+static unsigned
+fd_set_and(fd_set *result, fd_set *first, fd_set *second, unsigned n)
+{
+	unsigned i, not_empty = 0;
+
+	for (i = 0; i < __FDELT__(n); i++)
+		if((result->fds_bits[i] =
+		    first->fds_bits[i] & second->fds_bits[i]))
+			not_empty = 1;
+
+	if (i < __FDSET_LONGS__
+	    && (result->fds_bits[i] =
+		first->fds_bits[i] & second->fds_bits[i] & (__FDMASK__(n) - 1)))
+		not_empty = 1;
+
+	return not_empty;
+}
+
+static void fd_set_zeropad(fd_set *set, unsigned n)
+{
+	unsigned i;
+
+	i = __FDELT__(n);
+
+	if (i < __FDSET_LONGS__)
+		set->fds_bits[i] &= (__FDMASK__(n) - 1);
+
+	for(i++; i < __FDSET_LONGS__; i++)
+		set->fds_bits[i] = 0;
+}
+
+static unsigned fd_set_popcount(fd_set *set, unsigned n)
+{
+	unsigned count = 0, i;
+
+	for (i = 0; i < __FDELT__(n); i++)
+		if (set->fds_bits[i])
+			count += hweight_long(set->fds_bits[i]);
+
+	if (i < __FDSET_LONGS__ && (set->fds_bits[i] & (__FDMASK__(n) - 1)))
+		count += hweight_long(set->fds_bits[i] & (__FDMASK__(n) - 1));
+
+	return count;
+}
+
+/**
+ * Initialize a selector structure.
+ *
+ * @param selector The selector structure to be initialized.
+ *
+ * @retval 0
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnselector_init(struct xnselector *selector)
+{
+	unsigned int i;
+
+	xnsynch_init(&selector->synchbase, XNSYNCH_FIFO, NULL);
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++) {
+		__FD_ZERO__(&selector->fds[i].expected);
+		__FD_ZERO__(&selector->fds[i].pending);
+	}
+	INIT_LIST_HEAD(&selector->bindings);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnselector_init);
+
+/**
+ * Check the state of a number of file descriptors, wait for a state change if
+ * no descriptor is ready.
+ *
+ * @param selector structure to check for pending events
+ * @param out_fds The set of descriptors with pending events if a strictly positive number is returned, or the set of descriptors not yet bound if -ECHRNG is returned;
+ * @param in_fds the set of descriptors which events should be checked
+ * @param nfds the highest-numbered descriptor in any of the @a in_fds sets, plus 1;
+ * @param timeout the timeout, whose meaning depends on @a timeout_mode, note
+ * that xnselect() pass @a timeout and @a timeout_mode unchanged to
+ * xnsynch_sleep_on, so passing a relative value different from XN_INFINITE as a
+ * timeout with @a timeout_mode set to XN_RELATIVE, will cause a longer sleep
+ * than expected if the sleep is interrupted.
+ * @param timeout_mode the mode of @a timeout.
+ *
+ * @retval -EINVAL if @a nfds is negative;
+ * @retval -ECHRNG if some of the descriptors passed in @a in_fds have not yet
+ * been registered with xnselect_bind(), @a out_fds contains the set of such
+ * descriptors;
+ * @retval -EINTR if @a xnselect was interrupted while waiting;
+ * @retval 0 in case of timeout.
+ * @retval the number of file descriptors having received an event.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int xnselect(struct xnselector *selector,
+	     fd_set *out_fds[XNSELECT_MAX_TYPES],
+	     fd_set *in_fds[XNSELECT_MAX_TYPES],
+	     int nfds,
+	     xnticks_t timeout, xntmode_t timeout_mode)
+{
+	unsigned int i, not_empty = 0, count;
+	int info = 0;
+	spl_t s;
+
+	if ((unsigned) nfds > __FD_SETSIZE)
+		return -EINVAL;
+
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (out_fds[i])
+			fd_set_zeropad(out_fds[i], nfds);
+
+	xnlock_get_irqsave(&nklock, s);
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (out_fds[i]
+		    && fd_set_andnot(out_fds[i], in_fds[i],
+				     &selector->fds[i].expected, nfds))
+			not_empty = 1;
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (not_empty)
+		return -ECHRNG;
+
+	xnlock_get_irqsave(&nklock, s);
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (out_fds[i]
+		    && fd_set_and(out_fds[i], in_fds[i],
+				  &selector->fds[i].pending, nfds))
+			not_empty = 1;
+
+	while (!not_empty) {
+		info = xnsynch_sleep_on(&selector->synchbase,
+					timeout, timeout_mode);
+
+		for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+			if (out_fds[i]
+			    && fd_set_and(out_fds[i], in_fds[i],
+					  &selector->fds[i].pending, nfds))
+				not_empty = 1;
+
+		if (info & (XNBREAK | XNTIMEO))
+			break;
+	}
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (not_empty) {
+		for (count = 0, i = 0; i < XNSELECT_MAX_TYPES; i++)
+			if (out_fds[i])
+				count += fd_set_popcount(out_fds[i], nfds);
+
+		return count;
+	}
+
+	if (info & XNBREAK)
+		return -EINTR;
+
+	return 0; /* Timeout */
+}
+EXPORT_SYMBOL_GPL(xnselect);
+
+/**
+ * Destroy a selector block.
+ *
+ * All bindings with file descriptor are destroyed.
+ *
+ * @param selector the selector block to be destroyed
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnselector_destroy(struct xnselector *selector)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&selector->destroy_link, &selector_list);
+	pipeline_post_sirq(deletion_virq);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnselector_destroy);
+
+static irqreturn_t xnselector_destroy_loop(int virq, void *dev_id)
+{
+	struct xnselect_binding *binding, *tmpb;
+	struct xnselector *selector, *tmps;
+	struct xnselect *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&selector_list))
+		goto out;
+
+	list_for_each_entry_safe(selector, tmps, &selector_list, destroy_link) {
+		list_del(&selector->destroy_link);
+		if (list_empty(&selector->bindings))
+			goto release;
+		list_for_each_entry_safe(binding, tmpb, &selector->bindings, slink) {
+			list_del(&binding->slink);
+			fd = binding->fd;
+			list_del(&binding->link);
+			xnlock_put_irqrestore(&nklock, s);
+			xnfree(binding);
+			xnlock_get_irqsave(&nklock, s);
+		}
+	release:
+		xnsynch_destroy(&selector->synchbase);
+		xnsched_run();
+		xnlock_put_irqrestore(&nklock, s);
+
+		xnfree(selector);
+
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return IRQ_HANDLED;
+}
+
+int xnselect_mount(void)
+{
+	deletion_virq = pipeline_create_inband_sirq(xnselector_destroy_loop);
+	if (deletion_virq < 0)
+		return deletion_virq;
+
+	return 0;
+}
+
+int xnselect_umount(void)
+{
+	pipeline_delete_inband_sirq(deletion_virq);
+	return 0;
+}
+
+/** @} */
+++ linux-patched/kernel/xenomai/debug.h	2022-03-21 12:58:28.854894178 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/procfs.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _KERNEL_COBALT_DEBUG_H
+#define _KERNEL_COBALT_DEBUG_H
+
+#include <cobalt/kernel/assert.h>
+
+struct xnthread;
+
+#ifdef CONFIG_XENO_OPT_DEBUG
+
+int xndebug_init(void);
+
+void xndebug_cleanup(void);
+
+void xndebug_shadow_init(struct xnthread *thread);
+
+extern struct xnvfile_directory cobalt_debug_vfroot;
+
+#else  /* !XENO_OPT_DEBUG */
+
+static inline int xndebug_init(void)
+{
+	return 0;
+}
+
+static inline void xndebug_cleanup(void)
+{
+}
+
+static inline void xndebug_shadow_init(struct xnthread *thread)
+{
+}
+
+#endif  /* !XENO_OPT_DEBUG */
+
+#ifdef CONFIG_XENO_OPT_DEBUG_TRACE_RELAX
+void xndebug_notify_relax(struct xnthread *thread,
+			  int reason);
+void xndebug_trace_relax(int nr, unsigned long *backtrace,
+			 int reason);
+#else
+static inline
+void xndebug_notify_relax(struct xnthread *thread, int reason)
+{
+}
+static inline
+void xndebug_trace_relax(int nr, unsigned long *backtrace,
+			 int reason)
+{
+	/* Simply ignore. */
+}
+#endif
+
+#endif /* !_KERNEL_COBALT_DEBUG_H */
+++ linux-patched/kernel/xenomai/procfs.h	2022-03-21 12:58:28.850894217 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _KERNEL_COBALT_PROCFS_H
+#define _KERNEL_COBALT_PROCFS_H
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int xnprocfs_init_tree(void);
+void xnprocfs_cleanup_tree(void);
+#else
+static inline int xnprocfs_init_tree(void) { return 0; }
+static inline void xnprocfs_cleanup_tree(void) { }
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+#endif /* !_KERNEL_COBALT_PROCFS_H */
+++ linux-patched/kernel/xenomai/Kconfig	2022-03-21 12:58:28.847894247 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/heap.c	1970-01-01 01:00:00.000000000 +0100
+menu "Core features"
+
+config XENO_OPT_SCHED_CLASSES
+	bool "Extra scheduling classes"
+	default n
+	help
+	The Cobalt kernel implements a set of scheduling classes.
+	Each scheduling class defines its own set of rules for
+	determining when and how to select a new thread to run.
+
+	Cobalt has a built-in real-time class, which supports both
+	preemptive fixed-priority FIFO, and round-robin scheduling.
+
+	Enabling CONFIG_XENO_OPT_SCHED_CLASSES allows you to select
+	additional scheduling classes to enable in the Cobalt kernel.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_WEAK
+	bool "Weak scheduling class"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option creates a Cobalt scheduling class for mapping
+	members of the regular POSIX SCHED_FIFO/RR policies to a low
+	priority class of the Cobalt kernel, providing no real-time
+	guarantee. Therefore, up to a hundred non real-time priority
+	levels are available from the SCHED_WEAK policy.
+
+	When CONFIG_XENO_OPT_SCHED_WEAK is disabled, Cobalt still
+	supports a single non real-time priority level (i.e. zero
+	priority), assigned to members of the SCHED_OTHER class.
+
+	SCHED_WEAK/SCHED_OTHER threads can access Cobalt resources,
+	wait on Cobalt synchronization objects, but cannot compete for
+	the CPU with members of the real-time Cobalt classes.
+
+	Since Cobalt assumes no real-time requirement for
+	SCHED_WEAK/SCHED_OTHER threads, they are automatically moved
+	back to secondary mode upon return from any Cobalt syscall if
+	necessary, unless they hold a Cobalt mutex, which would defer
+	the transition until such mutex is released.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_TP
+	bool "Temporal partitioning"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option enables support for temporal partitioning.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_TP_NRPART
+	int "Number of partitions"
+	default 4
+	range 1 1024
+	depends on XENO_OPT_SCHED_TP
+	help
+	Define here the maximum number of temporal partitions the TP
+	scheduler may have to handle.
+
+config XENO_OPT_SCHED_SPORADIC
+	bool "Sporadic scheduling"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option enables support for the sporadic scheduling policy
+	in the Cobalt kernel (SCHED_SPORADIC), also known as POSIX
+	sporadic server.
+
+	It can be used to enforce a capped limit on the execution time
+	of a thread within a given period of time.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_SPORADIC_MAXREPL
+	int "Maximum number of pending replenishments"
+	default 8
+	range 4 16
+	depends on XENO_OPT_SCHED_SPORADIC
+	help
+	For performance reason, the budget replenishment information
+	is statically stored on a per-thread basis. This parameter
+	defines the maximum number of replenishment requests that can
+	be pending concurrently for any given thread that undergoes
+	sporadic scheduling (system minimum is 4).
+
+config XENO_OPT_SCHED_QUOTA
+	bool "Thread groups with runtime quota"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option enables the SCHED_QUOTA scheduling policy in the
+	Cobalt kernel.
+
+	This policy enforces a limitation on the CPU consumption of
+	threads over a globally defined period, known as the quota
+	interval. This is done by pooling threads with common
+	requirements in groups, and giving each group a share of the
+	global period (see CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD).
+
+	When threads have entirely consumed the quota allotted to the
+	group they belong to, the latter is suspended as a whole,
+	until the next quota interval starts. At this point, a new
+	runtime budget is given to each group, in accordance with its
+	share.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_QUOTA_PERIOD
+	int "Quota interval (us)"
+	default 10000
+	range 100 1000000000
+	depends on XENO_OPT_SCHED_QUOTA
+	help
+	The global period thread groups can get a share of.
+
+config XENO_OPT_SCHED_QUOTA_NR_GROUPS
+	int "Number of thread groups"
+	default 32
+	range 1 1024
+	depends on XENO_OPT_SCHED_QUOTA
+	help
+	The overall number of thread groups which may be defined
+	across all CPUs.
+
+config XENO_OPT_STATS
+	bool "Runtime statistics"
+	depends on XENO_OPT_VFILE
+	default y
+	help
+	This option causes the Cobalt kernel to collect various
+	per-thread runtime statistics, which are accessible through
+	the /proc/xenomai/sched/stat interface.
+
+config XENO_OPT_STATS_IRQS
+	bool "Account IRQ handlers separatly"
+	depends on XENO_OPT_STATS && IPIPE
+	default y
+	help
+	When enabled, the runtime of interrupt handlers is accounted
+	separately from the threads they interrupt. Also, the
+	occurrence of shared interrupts is accounted on a per-handler
+	basis.
+
+	This option is available to legacy I-pipe builds only.
+
+config XENO_OPT_SHIRQ
+	bool "Shared interrupts"
+	help
+	Enables support for both level- and edge-triggered shared
+	interrupts, so that multiple real-time interrupt handlers
+	are allowed to control dedicated hardware devices which are
+	configured to share the same interrupt line.
+
+config XENO_OPT_RR_QUANTUM
+	int "Round-robin quantum (us)"
+	default 1000
+	help
+	This parameter defines the duration of the default round-robin
+	time quantum expressed as a count of micro-seconds. This value
+	may be overriden internally by Cobalt services which do
+	provide a round-robin interval.
+
+config XENO_OPT_AUTOTUNE
+        tristate "Auto-tuning"
+        default y
+	select XENO_DRIVERS_AUTOTUNE
+        help
+	Enable auto-tuning capabilities. Auto-tuning is used for
+	adjusting the core timing services to the intrinsic latency of
+	the platform.
+
+config XENO_OPT_SCALABLE_SCHED
+	bool "O(1) scheduler"
+	help
+	This option causes a multi-level priority queue to be used in
+	the real-time scheduler, so that it operates in constant-time
+	regardless of the number of _concurrently runnable_ threads
+	(which might be much lower than the total number of active
+	threads).
+
+	Its use is recommended for large multi-threaded systems
+	involving more than 10 of such threads; otherwise, the default
+	linear method usually performs better with lower memory
+	footprints.
+
+choice
+	prompt "Timer indexing method"
+	default XENO_OPT_TIMER_LIST if !X86_64
+	default XENO_OPT_TIMER_RBTREE if X86_64
+	help
+	This option allows to select the underlying data structure
+	which is going to be used for ordering the outstanding
+	software timers managed by the Cobalt kernel.
+
+config XENO_OPT_TIMER_LIST
+	bool "Linear"
+	help
+	Use a linked list. Albeit O(N), this simple data structure is
+	particularly efficient when only a few timers (< 10) may be
+	concurrently outstanding at any point in time.
+
+config XENO_OPT_TIMER_RBTREE
+	bool "Tree"
+	help
+	Use a red-black tree. This data structure is efficient when a
+	high number of software timers may be concurrently
+	outstanding at any point in time.
+
+endchoice
+
+config XENO_OPT_PIPE
+	bool
+
+config XENO_OPT_MAP
+	bool
+
+config XENO_OPT_EXTCLOCK
+       bool
+
+config XENO_OPT_COBALT_EXTENSION
+       bool
+
+config XENO_OPT_VFILE
+       bool
+       depends on PROC_FS
+       default y
+
+endmenu
+
+menu "Sizes and static limits"
+
+config XENO_OPT_PIPE_NRDEV
+	int "Number of pipe devices"
+	depends on XENO_OPT_PIPE
+	default 32
+	help
+	Message pipes are bi-directional FIFO communication channels
+	allowing data exchange between Cobalt threads and regular
+	POSIX threads. Pipes natively preserve message boundaries, but
+	can also be used in byte streaming mode from kernel to
+	user-space.
+
+	This option sets the maximum number of pipe devices supported
+	in the system. Pipe devices are named /dev/rtpN where N is a
+	device minor number ranging from 0 to XENO_OPT_PIPE_NRDEV - 1.
+
+config XENO_OPT_REGISTRY_NRSLOTS
+	int "Number of registry slots"
+	default 512
+	help
+	The registry is used by the Cobalt kernel to export named
+	resources to user-space programs via the /proc interface.
+	Each named resource occupies a registry slot. This option sets
+	the maximum number of resources the registry can handle.
+
+config XENO_OPT_SYS_HEAPSZ
+	int "Size of system heap (Kb)"
+	default 4096
+	help
+	The system heap is used for various internal allocations by
+	the Cobalt kernel. The size is expressed in Kilobytes.
+
+config XENO_OPT_PRIVATE_HEAPSZ
+	int "Size of private heap (Kb)"
+	default 256
+	help
+	The Cobalt kernel implements fast IPC mechanisms within the
+	scope of a process which require a private kernel memory heap
+	to be mapped in the address space of each Xenomai application
+	process. This option can be used to set the size of this
+	per-process heap.
+
+	64k is considered a large enough size for common use cases.
+
+config XENO_OPT_SHARED_HEAPSZ
+	int "Size of shared heap (Kb)"
+	default 256
+	help
+	The Cobalt kernel implements fast IPC mechanisms between
+	processes which require a shared kernel memory heap to be
+	mapped in the address space of all Xenomai application
+	processes. This option can be used to set the size of this
+	system-wide heap.
+
+	64k is considered a large enough size for common use cases.
+
+config XENO_OPT_NRTIMERS
+       int "Maximum number of POSIX timers per process"
+       default 256
+       help
+       This tunable controls how many POSIX timers can exist at any
+       given time for each Cobalt process (a timer is created by a
+       call to the timer_create() service of the Cobalt/POSIX API).
+
+config XENO_OPT_DEBUG_TRACE_LOGSZ
+       int "Trace log size"
+       depends on XENO_OPT_DEBUG_TRACE_RELAX
+       default 16
+       help
+       The size (kilobytes) of the trace log of relax requests. Once
+       this limit is reached, subsequent traces will be silently
+       discarded.
+
+       Writing to /proc/xenomai/debug/relax empties the trace log.
+
+endmenu
+
+menu "Latency settings"
+
+config XENO_OPT_TIMING_SCHEDLAT
+	int "User scheduling latency (ns)"
+	default 0
+	help
+	The user scheduling latency is the time between the
+	termination of an interrupt handler and the execution of the
+	first instruction of the real-time application thread this
+	handler resumes. A default value of 0 (recommended) will cause
+	a pre-calibrated value to be used.
+
+	If the auto-tuner is enabled, this value will be used as the
+	factory default when running "autotune --reset".
+
+config XENO_OPT_TIMING_KSCHEDLAT
+	int "Intra-kernel scheduling latency (ns)"
+	default 0
+	help
+	The intra-kernel scheduling latency is the time between the
+	termination of an interrupt handler and the execution of the
+	first instruction of the RTDM kernel thread this handler
+	resumes. A default value of 0 (recommended) will cause a
+	pre-calibrated value to be used.
+
+	Intra-kernel latency is usually significantly lower than user
+	scheduling latency on MMU-enabled platforms, due to CPU cache
+	latency.
+
+	If the auto-tuner is enabled, this value will be used as the
+	factory default when running "autotune --reset".
+
+config XENO_OPT_TIMING_IRQLAT
+	int "Interrupt latency (ns)"
+	default 0
+	help
+	The interrupt latency is the time between the occurrence of an
+	IRQ and the first instruction of the interrupt handler which
+	will service it. A default value of 0 (recommended) will cause
+	a pre-calibrated value to be used.
+
+	If the auto-tuner is enabled, this value will be used as the
+	factory default when running "autotune --reset".
+
+endmenu
+
+menuconfig XENO_OPT_DEBUG
+	depends on XENO_OPT_VFILE
+	bool "Debug support"
+	help
+	  When enabled, various debugging features can be switched
+	  on. They can help to find problems in applications, drivers,
+	  and the Cobalt kernel. XENO_OPT_DEBUG by itself does not have
+	  any impact on the generated code.
+
+if XENO_OPT_DEBUG
+
+config XENO_OPT_DEBUG_COBALT
+	bool "Cobalt runtime assertions"
+	help
+	  This option activates various assertions inside the Cobalt
+	  kernel. This option has limited overhead.
+
+config XENO_OPT_DEBUG_MEMORY
+	bool "Cobalt memory checks"
+	help
+	  This option enables memory debug checks inside the Cobalt
+	  kernel. This option may induce significant overhead with large
+	  heaps.
+
+config XENO_OPT_DEBUG_CONTEXT
+       bool "Check for calling context"
+       help
+         This option enables checks for the calling context in the
+         Cobalt kernel, aimed at detecting when regular Linux routines
+         are entered from a real-time context, and conversely.
+
+config XENO_OPT_DEBUG_LOCKING
+	bool "Spinlock debugging support"
+	default y if SMP
+	help
+	  This option activates runtime assertions, and measurements
+	  of spinlocks spinning time and duration in the Cobalt
+	  kernel. It helps finding latency spots due to interrupt
+	  masked sections. Statistics about the longest masked section
+	  can be found in /proc/xenomai/debug/lock.
+
+	  This option may induce a measurable overhead on low end
+	  machines.
+
+config XENO_OPT_DEBUG_USER
+	bool "User consistency checks"
+	help
+	  This option enables a set of consistency checks for
+	  detecting wrong runtime behavior in user applications.
+
+	  With some of the debug categories, threads can ask for
+	  notification when a problem is detected, by turning on the
+	  PTHREAD_WARNSW mode bit with pthread_setmode_np().  Cobalt
+	  sends the Linux-originated SIGDEBUG signal for notifying
+	  threads, along with a reason code passed into the associated
+	  siginfo data (see pthread_setmode_np()).
+	
+	  Some of these runtime checks may induce overhead, enable
+	  them for debugging purposes only.
+
+if XENO_OPT_DEBUG_USER
+
+config XENO_OPT_DEBUG_MUTEX_RELAXED
+       bool "Detect relaxed mutex owner"
+       default y
+       help
+         A thread which attempts to acquire a mutex currently owned by
+         another thread running in secondary/relaxed mode thread will
+         suffer unwanted latencies, due to a priority inversion.
+         debug notifications are enabled for such thread, it receives
+         a SIGDEBUG signal.
+
+	 This option has some overhead in real-time mode over
+	 contented mutexes.
+ 
+config XENO_OPT_DEBUG_MUTEX_SLEEP
+       bool "Detect sleeping with mutex"
+       default y
+       help
+         A thread which goes sleeping while holding a mutex is prone
+         to cause unwanted latencies to other threads serialized by
+         the same lock. If debug notifications are enabled for such
+         thread, it receives a SIGDEBUG signal right before entering
+	 sleep.
+
+	 This option has noticeable overhead in real-time mode as it
+	 disables the normal fast mutex operations from user-space,
+	 causing a system call for each mutex acquisition/release.
+
+config XENO_OPT_DEBUG_LEGACY
+        bool "Detect usage of legacy constructs/features"
+	default n
+	help
+	    Turns on detection of legacy API usage.
+
+endif # XENO_OPT_DEBUG_USER
+
+config XENO_OPT_DEBUG_TRACE_RELAX
+	bool "Trace relax requests"
+	default n
+	help
+	  This option enables recording of unwanted relax requests from
+	  user-space applications leaving the real-time domain, logging
+	  the thread information and code location involved. All records
+	  are readable from /proc/xenomai/debug/relax, and can be
+	  decoded using the "slackspot" utility.
+
+config XENO_OPT_WATCHDOG
+	bool "Watchdog support"
+	default y
+	help
+	  This option activates a watchdog aimed at detecting runaway
+	  Cobalt threads. If enabled, the watchdog triggers after a
+	  given period of uninterrupted real-time activity has elapsed
+	  without Linux interaction in the meantime.
+
+	  In such an event, the current thread is moved out the
+	  real-time domain, receiving a SIGDEBUG signal from the Linux
+	  kernel immediately after.
+
+	  The timeout value of the watchdog can be set using the
+	  XENO_OPT_WATCHDOG_TIMEOUT parameter.
+
+config XENO_OPT_WATCHDOG_TIMEOUT
+	depends on XENO_OPT_WATCHDOG
+	int "Watchdog timeout"
+	default 4
+	range 1 60
+	help
+	  Watchdog timeout value (in seconds).
+
+endif # XENO_OPT_DEBUG
+
+config XENO_TODO
+	bool "Reveal TODO places"
+	help
+	  This option causes a build time assertion to trigger
+	  when the TODO() marker is found in the compiled code.
+++ linux-patched/kernel/xenomai/heap.c	2022-03-21 12:58:28.843894286 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-idle.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/kernel/ancillaries.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_heap Dynamic memory allocation services
+ *
+ * This code implements a variant of the allocator described in
+ * "Design of a General Purpose Memory Allocator for the 4.3BSD Unix
+ * Kernel" by Marshall K. McKusick and Michael J. Karels (USENIX
+ * 1988), see http://docs.FreeBSD.org/44doc/papers/kernmalloc.pdf.
+ * The free page list is maintained in rbtrees for fast lookups of
+ * multi-page memory ranges, and pages holding bucketed memory have a
+ * fast allocation bitmap to manage their blocks internally.
+ *@{
+ */
+struct xnheap cobalt_heap;		/* System heap */
+EXPORT_SYMBOL_GPL(cobalt_heap);
+
+static LIST_HEAD(heapq);	/* Heap list for v-file dump */
+
+static int nrheaps;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static struct xnvfile_rev_tag vfile_tag;
+
+static struct xnvfile_snapshot_ops vfile_ops;
+
+struct vfile_priv {
+	struct xnheap *curr;
+};
+
+struct vfile_data {
+	size_t all_mem;
+	size_t free_mem;
+	char name[XNOBJECT_NAME_LEN];
+};
+
+static struct xnvfile_snapshot vfile = {
+	.privsz = sizeof(struct vfile_priv),
+	.datasz = sizeof(struct vfile_data),
+	.tag = &vfile_tag,
+	.ops = &vfile_ops,
+};
+
+static int vfile_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_priv *priv = xnvfile_iterator_priv(it);
+
+	if (list_empty(&heapq)) {
+		priv->curr = NULL;
+		return 0;
+	}
+
+	priv->curr = list_first_entry(&heapq, struct xnheap, next);
+
+	return nrheaps;
+}
+
+static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_data *p = data;
+	struct xnheap *heap;
+
+	if (priv->curr == NULL)
+		return 0;	/* We are done. */
+
+	heap = priv->curr;
+	if (list_is_last(&heap->next, &heapq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_entry(heap->next.next,
+					struct xnheap, next);
+
+	p->all_mem = xnheap_get_size(heap);
+	p->free_mem = xnheap_get_free(heap);
+	knamecpy(p->name, heap->name);
+
+	return 1;
+}
+
+static int vfile_show(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_data *p = data;
+
+	if (p == NULL)
+		xnvfile_printf(it, "%9s %9s  %s\n",
+			       "TOTAL", "FREE", "NAME");
+	else
+		xnvfile_printf(it, "%9zu %9zu  %s\n",
+			       p->all_mem,
+			       p->free_mem,
+			       p->name);
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_ops = {
+	.rewind = vfile_rewind,
+	.next = vfile_next,
+	.show = vfile_show,
+};
+
+void xnheap_init_proc(void)
+{
+	xnvfile_init_snapshot("heap", &vfile, &cobalt_vfroot);
+}
+
+void xnheap_cleanup_proc(void)
+{
+	xnvfile_destroy_snapshot(&vfile);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+enum xnheap_pgtype {
+	page_free =0,
+	page_cont =1,
+	page_list =2
+};
+
+static inline u32 __always_inline
+gen_block_mask(int log2size)
+{
+	return -1U >> (32 - (XNHEAP_PAGE_SIZE >> log2size));
+}
+
+static inline  __always_inline
+int addr_to_pagenr(struct xnheap *heap, void *p)
+{
+	return ((void *)p - heap->membase) >> XNHEAP_PAGE_SHIFT;
+}
+
+static inline  __always_inline
+void *pagenr_to_addr(struct xnheap *heap, int pg)
+{
+	return heap->membase + (pg << XNHEAP_PAGE_SHIFT);
+}
+
+#ifdef CONFIG_XENO_OPT_DEBUG_MEMORY
+/*
+ * Setting page_cont/page_free in the page map is only required for
+ * enabling full checking of the block address in free requests, which
+ * may be extremely time-consuming when deallocating huge blocks
+ * spanning thousands of pages. We only do such marking when running
+ * in memory debug mode.
+ */
+static inline bool
+page_is_valid(struct xnheap *heap, int pg)
+{
+	switch (heap->pagemap[pg].type) {
+	case page_free:
+	case page_cont:
+		return false;
+	case page_list:
+	default:
+		return true;
+	}
+}
+
+static void mark_pages(struct xnheap *heap,
+		       int pg, int nrpages,
+		       enum xnheap_pgtype type)
+{
+	while (nrpages-- > 0)
+		heap->pagemap[pg].type = type;
+}
+
+#else
+
+static inline bool
+page_is_valid(struct xnheap *heap, int pg)
+{
+	return true;
+}
+
+static void mark_pages(struct xnheap *heap,
+		       int pg, int nrpages,
+		       enum xnheap_pgtype type)
+{ }
+
+#endif
+
+static struct xnheap_range *
+search_size_ge(struct rb_root *t, size_t size)
+{
+	struct rb_node *rb, *deepest = NULL;
+	struct xnheap_range *r;
+	
+	/*
+	 * We first try to find an exact match. If that fails, we walk
+	 * the tree in logical order by increasing size value from the
+	 * deepest node traversed until we find the first successor to
+	 * that node, or nothing beyond it, whichever comes first.
+	 */
+	rb = t->rb_node;
+	while (rb) {
+		deepest = rb;
+		r = rb_entry(rb, struct xnheap_range, size_node);
+		if (size < r->size) {
+			rb = rb->rb_left;
+			continue;
+		}
+		if (size > r->size) {
+			rb = rb->rb_right;
+			continue;
+		}
+		return r;
+	}
+
+	rb = deepest;
+	while (rb) {
+		r = rb_entry(rb, struct xnheap_range, size_node);
+		if (size <= r->size)
+			return r;
+		rb = rb_next(rb);
+	}
+
+	return NULL;
+}
+
+static struct xnheap_range *
+search_left_mergeable(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node *node = heap->addr_tree.rb_node;
+	struct xnheap_range *p;
+
+  	while (node) {
+		p = rb_entry(node, struct xnheap_range, addr_node);
+		if ((void *)p + p->size == (void *)r)
+			return p;
+		if (&r->addr_node < node)
+  			node = node->rb_left;
+		else
+  			node = node->rb_right;
+	}
+
+	return NULL;
+}
+
+static struct xnheap_range *
+search_right_mergeable(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node *node = heap->addr_tree.rb_node;
+	struct xnheap_range *p;
+
+  	while (node) {
+		p = rb_entry(node, struct xnheap_range, addr_node);
+		if ((void *)r + r->size == (void *)p)
+			return p;
+		if (&r->addr_node < node)
+  			node = node->rb_left;
+		else
+  			node = node->rb_right;
+	}
+
+	return NULL;
+}
+
+static void insert_range_bysize(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node **new = &heap->size_tree.rb_node, *parent = NULL;
+	struct xnheap_range *p;
+
+  	while (*new) {
+  		p = container_of(*new, struct xnheap_range, size_node);
+		parent = *new;
+  		if (r->size <= p->size)
+  			new = &((*new)->rb_left);
+  		else
+  			new = &((*new)->rb_right);
+  	}
+
+  	rb_link_node(&r->size_node, parent, new);
+  	rb_insert_color(&r->size_node, &heap->size_tree);
+}
+
+static void insert_range_byaddr(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node **new = &heap->addr_tree.rb_node, *parent = NULL;
+	struct xnheap_range *p;
+
+  	while (*new) {
+  		p = container_of(*new, struct xnheap_range, addr_node);
+		parent = *new;
+  		if (r < p)
+  			new = &((*new)->rb_left);
+  		else
+  			new = &((*new)->rb_right);
+  	}
+
+  	rb_link_node(&r->addr_node, parent, new);
+  	rb_insert_color(&r->addr_node, &heap->addr_tree);
+}
+
+static int reserve_page_range(struct xnheap *heap, size_t size)
+{
+	struct xnheap_range *new, *splitr;
+
+	/* Find a suitable range of pages covering 'size'. */
+	new = search_size_ge(&heap->size_tree, size);
+	if (new == NULL)
+		return -1;
+
+	rb_erase(&new->size_node, &heap->size_tree);
+	if (new->size == size) {
+		rb_erase(&new->addr_node, &heap->addr_tree);
+		return addr_to_pagenr(heap, new);
+	}
+
+	/*
+	 * The free range fetched is larger than what we need: split
+	 * it in two, the upper part is returned to the caller, the
+	 * lower part is sent back to the free list, which makes
+	 * reindexing by address pointless.
+	 */
+	splitr = new;
+	splitr->size -= size;
+	new = (struct xnheap_range *)((void *)new + splitr->size);
+	insert_range_bysize(heap, splitr);
+
+	return addr_to_pagenr(heap, new);
+}
+
+static void release_page_range(struct xnheap *heap,
+			       void *page, size_t size)
+{
+	struct xnheap_range *freed = page, *left, *right;
+	bool addr_linked = false;
+
+	freed->size = size;
+
+	left = search_left_mergeable(heap, freed);
+	if (left) {
+		rb_erase(&left->size_node, &heap->size_tree);
+		left->size += freed->size;
+		freed = left;
+		addr_linked = true;
+	}
+
+	right = search_right_mergeable(heap, freed);
+	if (right) {
+		rb_erase(&right->size_node, &heap->size_tree);
+		freed->size += right->size;
+		if (addr_linked)
+			rb_erase(&right->addr_node, &heap->addr_tree);
+		else
+			rb_replace_node(&right->addr_node, &freed->addr_node,
+					&heap->addr_tree);
+	} else if (!addr_linked)
+		insert_range_byaddr(heap, freed);
+
+	insert_range_bysize(heap, freed);
+	mark_pages(heap, addr_to_pagenr(heap, page),
+		   size >> XNHEAP_PAGE_SHIFT, page_free);
+}
+
+static void add_page_front(struct xnheap *heap,
+			   int pg, int log2size)
+{
+	struct xnheap_pgentry *new, *head, *next;
+	int ilog;
+
+	/* Insert page at front of the per-bucket page list. */
+	
+	ilog = log2size - XNHEAP_MIN_LOG2;
+	new = &heap->pagemap[pg];
+	if (heap->buckets[ilog] == -1U) {
+		heap->buckets[ilog] = pg;
+		new->prev = new->next = pg;
+	} else {
+		head = &heap->pagemap[heap->buckets[ilog]];
+		new->prev = heap->buckets[ilog];
+		new->next = head->next;
+		next = &heap->pagemap[new->next];
+		next->prev = pg;
+		head->next = pg;
+		heap->buckets[ilog] = pg;
+	}
+}
+
+static void remove_page(struct xnheap *heap,
+			int pg, int log2size)
+{
+	struct xnheap_pgentry *old, *prev, *next;
+	int ilog = log2size - XNHEAP_MIN_LOG2;
+
+	/* Remove page from the per-bucket page list. */
+
+	old = &heap->pagemap[pg];
+	if (pg == old->next)
+		heap->buckets[ilog] = -1U;
+	else {
+		if (pg == heap->buckets[ilog])
+			heap->buckets[ilog] = old->next;
+		prev = &heap->pagemap[old->prev];
+		prev->next = old->next;
+		next = &heap->pagemap[old->next];
+		next->prev = old->prev;
+	}
+}
+
+static void move_page_front(struct xnheap *heap,
+			    int pg, int log2size)
+{
+	int ilog = log2size - XNHEAP_MIN_LOG2;
+
+	/* Move page at front of the per-bucket page list. */
+	
+	if (heap->buckets[ilog] == pg)
+		return;	 /* Already at front, no move. */
+		
+	remove_page(heap, pg, log2size);
+	add_page_front(heap, pg, log2size);
+}
+
+static void move_page_back(struct xnheap *heap,
+			   int pg, int log2size)
+{
+	struct xnheap_pgentry *old, *last, *head, *next;
+	int ilog;
+
+	/* Move page at end of the per-bucket page list. */
+	
+	old = &heap->pagemap[pg];
+	if (pg == old->next) /* Singleton, no move. */
+		return;
+		
+	remove_page(heap, pg, log2size);
+
+	ilog = log2size - XNHEAP_MIN_LOG2;
+	head = &heap->pagemap[heap->buckets[ilog]];
+	last = &heap->pagemap[head->prev];
+	old->prev = head->prev;
+	old->next = last->next;
+	next = &heap->pagemap[old->next];
+	next->prev = pg;
+	last->next = pg;
+}
+
+static void *add_free_range(struct xnheap *heap,
+			    size_t bsize, int log2size)
+{
+	int pg;
+
+	pg = reserve_page_range(heap, ALIGN(bsize, XNHEAP_PAGE_SIZE));
+	if (pg < 0)
+		return NULL;
+	
+	/*
+	 * Update the page entry.  If @log2size is non-zero
+	 * (i.e. bsize < XNHEAP_PAGE_SIZE), bsize is (1 << log2Size)
+	 * between 2^XNHEAP_MIN_LOG2 and 2^(XNHEAP_PAGE_SHIFT - 1).
+	 * Save the log2 power into entry.type, then update the
+	 * per-page allocation bitmap to reserve the first block.
+	 *
+	 * Otherwise, we have a larger block which may span multiple
+	 * pages: set entry.type to page_list, indicating the start of
+	 * the page range, and entry.bsize to the overall block size.
+	 */
+	if (log2size) {
+		heap->pagemap[pg].type = log2size;
+		/*
+		 * Mark the first object slot (#0) as busy, along with
+		 * the leftmost bits we won't use for this log2 size.
+		 */
+		heap->pagemap[pg].map = ~gen_block_mask(log2size) | 1;
+		/*
+		 * Insert the new page at front of the per-bucket page
+		 * list, enforcing the assumption that pages with free
+		 * space live close to the head of this list.
+		 */
+		add_page_front(heap, pg, log2size);
+	} else {
+		heap->pagemap[pg].type = page_list;
+		heap->pagemap[pg].bsize = (u32)bsize;
+		mark_pages(heap, pg + 1,
+			   (bsize >> XNHEAP_PAGE_SHIFT) - 1, page_cont);
+	}
+
+	heap->used_size += bsize;
+
+	return pagenr_to_addr(heap, pg);
+}
+
+/**
+ * @fn void *xnheap_alloc(struct xnheap *heap, size_t size)
+ * @brief Allocate a memory block from a memory heap.
+ *
+ * Allocates a contiguous region of memory from an active memory heap.
+ * Such allocation is guaranteed to be time-bounded.
+ *
+ * @param heap The descriptor address of the heap to get memory from.
+ *
+ * @param size The size in bytes of the requested block.
+ *
+ * @return The address of the allocated region upon success, or NULL
+ * if no memory is available from the specified heap.
+ *
+ * @coretags{unrestricted}
+ */
+void *xnheap_alloc(struct xnheap *heap, size_t size)
+{
+	int log2size, ilog, pg, b = -1;
+	size_t bsize;
+	void *block;
+	spl_t s;
+
+	if (size == 0)
+		return NULL;
+
+	if (size < XNHEAP_MIN_ALIGN) {
+		bsize = size = XNHEAP_MIN_ALIGN;
+		log2size = XNHEAP_MIN_LOG2;
+	} else {
+		log2size = ilog2(size);
+		if (log2size < XNHEAP_PAGE_SHIFT) {
+			if (size & (size - 1))
+				log2size++;
+			bsize = 1 << log2size;
+		} else
+			bsize = ALIGN(size, XNHEAP_PAGE_SIZE);
+	}
+	
+	/*
+	 * Allocate entire pages directly from the pool whenever the
+	 * block is larger or equal to XNHEAP_PAGE_SIZE.  Otherwise,
+	 * use bucketed memory.
+	 *
+	 * NOTE: Fully busy pages from bucketed memory are moved back
+	 * at the end of the per-bucket page list, so that we may
+	 * always assume that either the heading page has some room
+	 * available, or no room is available from any page linked to
+	 * this list, in which case we should immediately add a fresh
+	 * page.
+	 */
+	xnlock_get_irqsave(&heap->lock, s);
+
+	if (bsize >= XNHEAP_PAGE_SIZE)
+		/* Add a range of contiguous free pages. */
+		block = add_free_range(heap, bsize, 0);
+	else {
+		ilog = log2size - XNHEAP_MIN_LOG2;
+		XENO_WARN_ON(MEMORY, ilog < 0 || ilog >= XNHEAP_MAX_BUCKETS);
+		pg = heap->buckets[ilog];
+		/*
+		 * Find a block in the heading page if any. If there
+		 * is none, there won't be any down the list: add a
+		 * new page right away.
+		 */
+		if (pg < 0 || heap->pagemap[pg].map == -1U)
+			block = add_free_range(heap, bsize, log2size);
+		else {
+			b = ffs(~heap->pagemap[pg].map) - 1;
+			/*
+			 * Got one block from the heading per-bucket
+			 * page, tag it as busy in the per-page
+			 * allocation map.
+			 */
+			heap->pagemap[pg].map |= (1U << b);
+			heap->used_size += bsize;
+			block = heap->membase +
+				(pg << XNHEAP_PAGE_SHIFT) +
+				(b << log2size);
+			if (heap->pagemap[pg].map == -1U)
+				move_page_back(heap, pg, log2size);
+		}
+	}
+
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	return block;
+}
+EXPORT_SYMBOL_GPL(xnheap_alloc);
+
+/**
+ * @fn void xnheap_free(struct xnheap *heap, void *block)
+ * @brief Release a block to a memory heap.
+ *
+ * Releases a memory block to a heap.
+ *
+ * @param heap The heap descriptor.
+ *
+ * @param block The block to be returned to the heap.
+ *
+ * @coretags{unrestricted}
+ */
+void xnheap_free(struct xnheap *heap, void *block)
+{
+	unsigned long pgoff, boff;
+	int log2size, pg, n;
+	size_t bsize;
+	u32 oldmap;
+	spl_t s;
+
+	xnlock_get_irqsave(&heap->lock, s);
+
+	/* Compute the heading page number in the page map. */
+	pgoff = block - heap->membase;
+	pg = pgoff >> XNHEAP_PAGE_SHIFT;
+
+	if (!page_is_valid(heap, pg))
+		goto bad;
+	
+	switch (heap->pagemap[pg].type) {
+	case page_list:
+		bsize = heap->pagemap[pg].bsize;
+		XENO_WARN_ON(MEMORY, (bsize & (XNHEAP_PAGE_SIZE - 1)) != 0);
+		release_page_range(heap, pagenr_to_addr(heap, pg), bsize);
+		break;
+
+	default:
+		log2size = heap->pagemap[pg].type;
+		bsize = (1 << log2size);
+		XENO_WARN_ON(MEMORY, bsize >= XNHEAP_PAGE_SIZE);
+		boff = pgoff & ~XNHEAP_PAGE_MASK;
+		if ((boff & (bsize - 1)) != 0) /* Not at block start? */
+			goto bad;
+
+		n = boff >> log2size; /* Block position in page. */
+		oldmap = heap->pagemap[pg].map;
+		heap->pagemap[pg].map &= ~(1U << n);
+
+		/*
+		 * If the page the block was sitting on is fully idle,
+		 * return it to the pool. Otherwise, check whether
+		 * that page is transitioning from fully busy to
+		 * partially busy state, in which case it should move
+		 * toward the front of the per-bucket page list.
+		 */
+		if (heap->pagemap[pg].map == ~gen_block_mask(log2size)) {
+			remove_page(heap, pg, log2size);
+			release_page_range(heap, pagenr_to_addr(heap, pg),
+					   XNHEAP_PAGE_SIZE);
+		} else if (oldmap == -1U)
+			move_page_front(heap, pg, log2size);
+	}
+
+	heap->used_size -= bsize;
+
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	return;
+bad:
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	XENO_WARN(MEMORY, 1, "invalid block %p in heap %s",
+		  block, heap->name);
+}
+EXPORT_SYMBOL_GPL(xnheap_free);
+
+ssize_t xnheap_check_block(struct xnheap *heap, void *block)
+{
+	unsigned long pg, pgoff, boff;
+	ssize_t ret = -EINVAL;
+	size_t bsize;
+	spl_t s;
+
+	xnlock_get_irqsave(&heap->lock, s);
+
+	/* Calculate the page number from the block address. */
+	pgoff = block - heap->membase;
+	pg = pgoff >> XNHEAP_PAGE_SHIFT;
+	if (page_is_valid(heap, pg)) {
+		if (heap->pagemap[pg].type == page_list)
+			bsize = heap->pagemap[pg].bsize;
+		else {
+			bsize = (1 << heap->pagemap[pg].type);
+			boff = pgoff & ~XNHEAP_PAGE_MASK;
+			if ((boff & (bsize - 1)) != 0) /* Not at block start? */
+				goto out;
+		}
+		ret = (ssize_t)bsize;
+	}
+out:
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnheap_check_block);
+
+/**
+ * @fn xnheap_init(struct xnheap *heap, void *membase, u32 size)
+ * @brief Initialize a memory heap.
+ *
+ * Initializes a memory heap suitable for time-bounded allocation
+ * requests of dynamic memory.
+ *
+ * @param heap The address of a heap descriptor to initialize.
+ *
+ * @param membase The address of the storage area.
+ *
+ * @param size The size in bytes of the storage area.  @a size must be
+ * a multiple of XNHEAP_PAGE_SIZE and smaller than (4Gb - PAGE_SIZE)
+ * in the current implementation.
+ *
+ * @return 0 is returned upon success, or:
+ *
+ * - -EINVAL is returned if @a size is either greater than
+ *   XNHEAP_MAX_HEAPSZ, or not aligned on PAGE_SIZE.
+ *
+ * - -ENOMEM is returned upon failure of allocating the meta-data area
+ * used internally to maintain the heap.
+ *
+ * @coretags{secondary-only}
+ */
+int xnheap_init(struct xnheap *heap, void *membase, size_t size)
+{
+	int n, nrpages;
+	spl_t s;
+
+	secondary_mode_only();
+
+ 	if (size > XNHEAP_MAX_HEAPSZ || !PAGE_ALIGNED(size))
+		return -EINVAL;
+
+	/* Reset bucket page lists, all empty. */
+	for (n = 0; n < XNHEAP_MAX_BUCKETS; n++)
+		heap->buckets[n] = -1U;
+
+	xnlock_init(&heap->lock);
+
+	nrpages = size >> XNHEAP_PAGE_SHIFT;
+	heap->pagemap = vzalloc(sizeof(struct xnheap_pgentry) * nrpages);
+	if (heap->pagemap == NULL)
+		return -ENOMEM;
+
+	heap->membase = membase;
+	heap->usable_size = size;
+	heap->used_size = 0;
+		      
+	/*
+	 * The free page pool is maintained as a set of ranges of
+	 * contiguous pages indexed by address and size in rbtrees.
+	 * Initially, we have a single range in those trees covering
+	 * the whole memory we have been given for the heap. Over
+	 * time, that range will be split then possibly re-merged back
+	 * as allocations and deallocations take place.
+	 */
+	heap->size_tree = RB_ROOT;
+	heap->addr_tree = RB_ROOT;
+	release_page_range(heap, membase, size);
+
+	/* Default name, override with xnheap_set_name() */
+	ksformat(heap->name, sizeof(heap->name), "(%p)", heap);
+
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&heap->next, &heapq);
+	nrheaps++;
+	xnvfile_touch_tag(&vfile_tag);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnheap_init);
+
+/**
+ * @fn void xnheap_destroy(struct xnheap *heap)
+ * @brief Destroys a memory heap.
+ *
+ * Destroys a memory heap.
+ *
+ * @param heap The heap descriptor.
+ *
+ * @coretags{secondary-only}
+ */
+void xnheap_destroy(struct xnheap *heap)
+{
+	spl_t s;
+
+	secondary_mode_only();
+
+	xnlock_get_irqsave(&nklock, s);
+	list_del(&heap->next);
+	nrheaps--;
+	xnvfile_touch_tag(&vfile_tag);
+	xnlock_put_irqrestore(&nklock, s);
+	vfree(heap->pagemap);
+}
+EXPORT_SYMBOL_GPL(xnheap_destroy);
+
+/**
+ * @fn xnheap_set_name(struct xnheap *heap,const char *name,...)
+ * @brief Set the heap's name string.
+ *
+ * Set the heap name that will be used in statistic outputs.
+ *
+ * @param heap The address of a heap descriptor.
+ *
+ * @param name Name displayed in statistic outputs. This parameter can
+ * be a printk()-like format argument list.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnheap_set_name(struct xnheap *heap, const char *name, ...)
+{
+	va_list args;
+
+	va_start(args, name);
+	kvsformat(heap->name, sizeof(heap->name), name, args);
+	va_end(args);
+}
+EXPORT_SYMBOL_GPL(xnheap_set_name);
+
+void *xnheap_vmalloc(size_t size)
+{
+	/*
+	 * We want memory used in real-time context to be pulled from
+	 * ZONE_NORMAL, however we don't need it to be physically
+	 * contiguous.
+	 *
+	 * 32bit systems which would need HIGHMEM for running a Cobalt
+	 * configuration would also be required to support PTE
+	 * pinning, which not all architectures provide.  Moreover,
+	 * pinning PTEs eagerly for a potentially (very) large amount
+	 * of memory may quickly degrade performance.
+	 *
+	 * If using a different kernel/user memory split cannot be the
+	 * answer for those configs, it's likely that basing such
+	 * software on a 32bit system had to be wrong in the first
+	 * place anyway.
+	 */
+	return vmalloc_kernel(size, 0);
+}
+EXPORT_SYMBOL_GPL(xnheap_vmalloc);
+
+void xnheap_vfree(void *p)
+{
+	vfree(p);
+}
+EXPORT_SYMBOL_GPL(xnheap_vfree);
+
+/** @} */
+++ linux-patched/kernel/xenomai/sched-idle.c	2022-03-21 12:58:28.840894315 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/map.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+
+static struct xnthread *xnsched_idle_pick(struct xnsched *sched)
+{
+	return &sched->rootcb;
+}
+
+static bool xnsched_idle_setparam(struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	return __xnsched_idle_setparam(thread, p);
+}
+
+static void xnsched_idle_getparam(struct xnthread *thread,
+				  union xnsched_policy_param *p)
+{
+	__xnsched_idle_getparam(thread, p);
+}
+
+static void xnsched_idle_trackprio(struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	__xnsched_idle_trackprio(thread, p);
+}
+
+static void xnsched_idle_protectprio(struct xnthread *thread, int prio)
+{
+	__xnsched_idle_protectprio(thread, prio);
+}
+
+struct xnsched_class xnsched_class_idle = {
+	.sched_init		=	NULL,
+	.sched_enqueue		=	NULL,
+	.sched_dequeue		=	NULL,
+	.sched_requeue		=	NULL,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_forget		=	NULL,
+	.sched_kick		=	NULL,
+	.sched_declare		=	NULL,
+	.sched_pick		=	xnsched_idle_pick,
+	.sched_setparam		=	xnsched_idle_setparam,
+	.sched_getparam		=	xnsched_idle_getparam,
+	.sched_trackprio	=	xnsched_idle_trackprio,
+	.sched_protectprio	=	xnsched_idle_protectprio,
+	.weight			=	XNSCHED_CLASS_WEIGHT(0),
+	.policy			=	SCHED_IDLE,
+	.name			=	"idle"
+};
+++ linux-patched/kernel/xenomai/map.c	2022-03-21 12:58:28.836894354 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/thread.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/map.h>
+#include <asm/xenomai/machine.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_map Lightweight key-to-object mapping service
+ *
+ * A map is a simple indexing structure which associates unique
+ * integer keys with pointers to objects.  The current implementation
+ * supports reservation, for naming/indexing objects, either on a
+ * fixed, user-provided integer (i.e. a reserved key value), or by
+ * drawing the next available key internally if the caller did not
+ * specify any fixed key. For instance, in some given map, the key
+ * space ranging from 0 to 255 could be reserved for fixed keys,
+ * whilst the range from 256 to 511 could be available for drawing
+ * free keys dynamically.
+ *
+ * A maximum of 1024 unique keys per map is supported on 32bit
+ * machines.
+ *
+ * (This implementation should not be confused with C++ STL maps,
+ * which are dynamically expandable and allow arbitrary key types;
+ * Xenomai maps don't).
+ *
+ * @{
+ */
+
+/**
+ * @fn void xnmap_create(int nkeys, int reserve, int offset)
+ * @brief Create a map.
+ *
+ * Allocates a new map with the specified addressing capabilities. The
+ * memory is obtained from the Xenomai system heap.
+ *
+ * @param nkeys The maximum number of unique keys the map will be able
+ * to hold. This value cannot exceed the static limit represented by
+ * XNMAP_MAX_KEYS, and must be a power of two.
+ *
+ * @param reserve The number of keys which should be kept for
+ * reservation within the index space. Reserving a key means to
+ * specify a valid key to the xnmap_enter() service, which will then
+ * attempt to register this exact key, instead of drawing the next
+ * available key from the unreserved index space. When reservation is
+ * in effect, the unreserved index space will hold key values greater
+ * than @a reserve, keeping the low key values for the reserved space.
+ * For instance, passing @a reserve = 32 would cause the index range [
+ * 0 .. 31 ] to be kept for reserved keys.  When non-zero, @a reserve
+ * is rounded to the next multiple of BITS_PER_LONG. If @a reserve is
+ * zero no reservation will be available from the map.
+ *
+ * @param offset The lowest key value xnmap_enter() will return to the
+ * caller. Key values will be in the range [ 0 + offset .. @a nkeys +
+ * offset - 1 ]. Negative offsets are valid.
+ *
+ * @return the address of the new map is returned on success;
+ * otherwise, NULL is returned if @a nkeys is invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+struct xnmap *xnmap_create(int nkeys, int reserve, int offset)
+{
+	struct xnmap *map;
+	int mapsize;
+
+	if (nkeys <= 0 || (nkeys & (nkeys - 1)) != 0)
+		return NULL;
+
+	mapsize = sizeof(*map) + (nkeys - 1) * sizeof(map->objarray[0]);
+	map = xnmalloc(mapsize);
+
+	if (!map)
+		return NULL;
+
+	map->ukeys = 0;
+	map->nkeys = nkeys;
+	map->offset = offset;
+	map->himask = (1 << ((reserve + BITS_PER_LONG - 1) / BITS_PER_LONG)) - 1;
+	map->himap = ~0;
+	memset(map->lomap, ~0, sizeof(map->lomap));
+	memset(map->objarray, 0, sizeof(map->objarray[0]) * nkeys);
+
+	return map;
+}
+EXPORT_SYMBOL_GPL(xnmap_create);
+
+/**
+ * @fn void xnmap_delete(struct xnmap *map)
+ * @brief Delete a map.
+ *
+ * Deletes a map, freeing any associated memory back to the Xenomai
+ * system heap.
+ *
+ * @param map The address of the map to delete.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnmap_delete(struct xnmap *map)
+{
+	xnfree(map);
+}
+EXPORT_SYMBOL_GPL(xnmap_delete);
+
+/**
+ * @fn void xnmap_enter(struct xnmap *map, int key, void *objaddr)
+ * @brief Index an object into a map.
+ *
+ * Insert a new object into the given map.
+ *
+ * @param map The address of the map to insert into.
+ *
+ * @param key The key to index the object on. If this key is within
+ * the valid index range [ 0 - offset .. nkeys - offset - 1 ], then an
+ * attempt to reserve this exact key is made. If @a key has an
+ * out-of-range value lower or equal to 0 - offset - 1, then an
+ * attempt is made to draw a free key from the unreserved index space.
+ *
+ * @param objaddr The address of the object to index on the key. This
+ * value will be returned by a successful call to xnmap_fetch() with
+ * the same key.
+ *
+ * @return a valid key is returned on success, either @a key if
+ * reserved, or the next free key. Otherwise:
+ *
+ * - -EEXIST is returned upon attempt to reserve a busy key.
+ *
+ * - -ENOSPC when no more free key is available.
+ *
+ * @coretags{unrestricted}
+ */
+int xnmap_enter(struct xnmap *map, int key, void *objaddr)
+{
+	int hi, lo, ofkey = key - map->offset;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (ofkey >= 0 && ofkey < map->nkeys) {
+		if (map->objarray[ofkey] != NULL) {
+			key = -EEXIST;
+			goto unlock_and_exit;
+		}
+	} else if (map->ukeys >= map->nkeys) {
+		key = -ENOSPC;
+		goto unlock_and_exit;
+	}
+	else {
+		/* The himask implements a namespace reservation of
+		   half of the bitmap space which cannot be used to
+		   draw keys. */
+
+		hi = ffnz(map->himap & ~map->himask);
+		lo = ffnz(map->lomap[hi]);
+		ofkey = hi * BITS_PER_LONG + lo;
+		++map->ukeys;
+
+		map->lomap[hi] &= ~(1UL << lo);
+		if (map->lomap[hi] == 0)
+			map->himap &= ~(1UL << hi);
+	}
+
+	map->objarray[ofkey] = objaddr;
+
+      unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ofkey + map->offset;
+}
+EXPORT_SYMBOL_GPL(xnmap_enter);
+
+/**
+ * @fn void xnmap_remove(struct xnmap *map, int key)
+ * @brief Remove an object reference from a map.
+ *
+ * Removes an object reference from the given map, releasing the
+ * associated key.
+ *
+ * @param map The address of the map to remove from.
+ *
+ * @param key The key the object reference to be removed is indexed
+ * on.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ESRCH is returned if @a key is invalid.
+ *
+ * @coretags{unrestricted}
+ */
+int xnmap_remove(struct xnmap *map, int key)
+{
+	int ofkey = key - map->offset, hi, lo;
+	spl_t s;
+
+	if (ofkey < 0 || ofkey >= map->nkeys)
+		return -ESRCH;
+
+	hi = ofkey / BITS_PER_LONG;
+	lo = ofkey % BITS_PER_LONG;
+	xnlock_get_irqsave(&nklock, s);
+	map->objarray[ofkey] = NULL;
+	map->himap |= (1UL << hi);
+	map->lomap[hi] |= (1UL << lo);
+	--map->ukeys;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnmap_remove);
+
+/**
+ * @fn void xnmap_fetch(struct xnmap *map, int key)
+ * @brief Search an object into a map.
+ *
+ * Retrieve an object reference from the given map by its index key.
+ *
+ * @param map The address of the map to retrieve from.
+ *
+ * @param key The key to be searched for in the map index.
+ *
+ * @return The indexed object address is returned on success,
+ * otherwise NULL is returned when @a key is invalid or no object is
+ * currently indexed on it.
+ *
+ * @coretags{unrestricted}
+ */
+
+/**
+ * @fn void xnmap_fetch_nocheck(struct xnmap *map, int key)
+ * @brief Search an object into a map - unchecked form.
+ *
+ * Retrieve an object reference from the given map by its index key,
+ * but does not perform any sanity check on the provided key.
+ *
+ * @param map The address of the map to retrieve from.
+ *
+ * @param key The key to be searched for in the map index.
+ *
+ * @return The indexed object address is returned on success,
+ * otherwise NULL is returned when no object is currently indexed on
+ * @a key.
+ *
+ * @coretags{unrestricted}
+ */
+
+/** @} */
+++ linux-patched/kernel/xenomai/thread.c	2022-03-21 12:58:28.832894393 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2006-2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2001-2013 The Xenomai project <http://www.xenomai.org>
+ *
+ * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/signal.h>
+#include <linux/pid.h>
+#include <linux/sched.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/select.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/thread.h>
+#include <pipeline/kevents.h>
+#include <pipeline/inband_work.h>
+#include <pipeline/sched.h>
+#include <trace/events/cobalt-core.h>
+#include "debug.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(join_all);
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_thread Thread services
+ * @{
+ */
+
+static void timeout_handler(struct xntimer *timer)
+{
+	struct xnthread *thread = container_of(timer, struct xnthread, rtimer);
+
+	xnthread_set_info(thread, XNTIMEO);	/* Interrupts are off. */
+	xnthread_resume(thread, XNDELAY);
+}
+
+static void periodic_handler(struct xntimer *timer)
+{
+	struct xnthread *thread = container_of(timer, struct xnthread, ptimer);
+	/*
+	 * Prevent unwanted round-robin, and do not wake up threads
+	 * blocked on a resource.
+	 */
+	if (xnthread_test_state(thread, XNDELAY|XNPEND) == XNDELAY)
+		xnthread_resume(thread, XNDELAY);
+
+	/*
+	 * The periodic thread might have migrated to another CPU
+	 * while passive, fix the timer affinity if need be.
+	 */
+	xntimer_set_affinity(&thread->ptimer, thread->sched);
+}
+
+static inline void enlist_new_thread(struct xnthread *thread)
+{				/* nklock held, irqs off */
+	list_add_tail(&thread->glink, &nkthreadq);
+	cobalt_nrthreads++;
+	xnvfile_touch_tag(&nkthreadlist_tag);
+}
+
+struct kthread_arg {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+	struct xnthread *thread;
+	struct completion *done;
+};
+
+static void do_parent_wakeup(struct pipeline_inband_work *inband_work)
+{
+	struct kthread_arg *ka;
+
+	ka = container_of(inband_work, struct kthread_arg, inband_work);
+	complete(ka->done);
+}
+
+static inline void init_kthread_info(struct xnthread *thread)
+{
+	struct cobalt_threadinfo *p;
+
+	p = pipeline_current();
+	p->thread = thread;
+	p->process = NULL;
+}
+
+static int map_kthread(struct xnthread *thread, struct kthread_arg *ka)
+{
+	int ret;
+	spl_t s;
+
+	if (xnthread_test_state(thread, XNUSER))
+		return -EINVAL;
+
+	if (xnthread_current() || xnthread_test_state(thread, XNMAPPED))
+		return -EBUSY;
+
+	thread->u_window = NULL;
+	xnthread_pin_initial(thread);
+
+	pipeline_init_shadow_tcb(thread);
+	xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
+	init_kthread_info(thread);
+	xnthread_set_state(thread, XNMAPPED);
+	xndebug_shadow_init(thread);
+	xnthread_run_handler(thread, map_thread);
+	pipeline_enable_kevents();
+
+	/*
+	 * CAUTION: Soon after xnthread_init() has returned,
+	 * xnthread_start() is commonly invoked from the root domain,
+	 * therefore the call site may expect the started kernel
+	 * shadow to preempt immediately. As a result of such
+	 * assumption, start attributes (struct xnthread_start_attr)
+	 * are often laid on the caller's stack.
+	 *
+	 * For this reason, we raise the completion signal to wake up
+	 * the xnthread_init() caller only once the emerging thread is
+	 * hardened, and __never__ before that point. Since we run
+	 * over the Xenomai domain upon return from xnthread_harden(),
+	 * we schedule a virtual interrupt handler in the root domain
+	 * to signal the completion object.
+	 */
+	xnthread_resume(thread, XNDORMANT);
+	ret = xnthread_harden();
+
+	trace_cobalt_lostage_request("wakeup", current);
+
+	ka->inband_work = (struct pipeline_inband_work)
+		PIPELINE_INBAND_WORK_INITIALIZER(*ka, do_parent_wakeup);
+	pipeline_post_inband_work(ka);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	enlist_new_thread(thread);
+	/*
+	 * Make sure xnthread_start() did not slip in from another CPU
+	 * while we were back from wakeup_parent().
+	 */
+	if (thread->entry == NULL)
+		xnthread_suspend(thread, XNDORMANT,
+				 XN_INFINITE, XN_RELATIVE, NULL);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnthread_test_cancel();
+
+	xntrace_pid(xnthread_host_pid(thread),
+		    xnthread_current_priority(thread));
+
+	return ret;
+}
+
+static int kthread_trampoline(void *arg)
+{
+	struct kthread_arg *ka = arg;
+	struct xnthread *thread = ka->thread;
+	struct sched_param param;
+	int ret, policy, prio;
+
+	/*
+	 * It only makes sense to create Xenomai kthreads with the
+	 * SCHED_FIFO, SCHED_NORMAL or SCHED_WEAK policies. So
+	 * anything that is not from Xenomai's RT class is assumed to
+	 * belong to SCHED_NORMAL linux-wise.
+	 */
+	if (thread->sched_class != &xnsched_class_rt) {
+		policy = SCHED_NORMAL;
+		prio = 0;
+	} else {
+		policy = SCHED_FIFO;
+		prio = normalize_priority(thread->cprio);
+	}
+
+	param.sched_priority = prio;
+	sched_setscheduler(current, policy, &param);
+
+	ret = map_kthread(thread, ka);
+	if (ret) {
+		printk(XENO_WARNING "failed to create kernel shadow %s\n",
+		       thread->name);
+		return ret;
+	}
+
+	trace_cobalt_shadow_entry(thread);
+
+	thread->entry(thread->cookie);
+
+	xnthread_cancel(thread);
+
+	return 0;
+}
+
+static inline int spawn_kthread(struct xnthread *thread)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+	struct kthread_arg ka = {
+		.thread = thread,
+		.done = &done
+	};
+	struct task_struct *p;
+
+	p = kthread_run(kthread_trampoline, &ka, "%s", thread->name);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+
+	wait_for_completion(&done);
+
+	return 0;
+}
+
+int __xnthread_init(struct xnthread *thread,
+		    const struct xnthread_init_attr *attr,
+		    struct xnsched *sched,
+		    struct xnsched_class *sched_class,
+		    const union xnsched_policy_param *sched_param)
+{
+	int flags = attr->flags, ret, gravity;
+
+	flags &= ~(XNSUSP|XNBOOST);
+#ifndef CONFIG_XENO_ARCH_FPU
+	flags &= ~XNFPU;
+#endif
+	if ((flags & XNROOT) == 0)
+		flags |= XNDORMANT;
+
+	if (attr->name)
+		ksformat(thread->name,
+			 sizeof(thread->name), "%s", attr->name);
+	else
+		ksformat(thread->name,
+			 sizeof(thread->name), "@%p", thread);
+
+	/*
+	 * We mirror the global user debug state into the per-thread
+	 * state, to speed up branch taking in lib/cobalt wherever
+	 * this needs to be tested.
+	 */
+	if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP))
+		flags |= XNDEBUG;
+
+	thread->personality = attr->personality;
+	cpumask_and(&thread->affinity, &attr->affinity, &cobalt_cpu_affinity);
+	thread->sched = sched;
+	thread->state = flags;
+	thread->info = 0;
+	thread->local_info = 0;
+	thread->wprio = XNSCHED_IDLE_PRIO;
+	thread->cprio = XNSCHED_IDLE_PRIO;
+	thread->bprio = XNSCHED_IDLE_PRIO;
+	thread->lock_count = 0;
+	thread->rrperiod = XN_INFINITE;
+	thread->wchan = NULL;
+	thread->wwake = NULL;
+	thread->wcontext = NULL;
+	thread->res_count = 0;
+	thread->handle = XN_NO_HANDLE;
+	memset(&thread->stat, 0, sizeof(thread->stat));
+	thread->selector = NULL;
+	INIT_LIST_HEAD(&thread->glink);
+	INIT_LIST_HEAD(&thread->boosters);
+	/* These will be filled by xnthread_start() */
+	thread->entry = NULL;
+	thread->cookie = NULL;
+	init_completion(&thread->exited);
+	memset(xnthread_archtcb(thread), 0, sizeof(struct xnarchtcb));
+	memset(thread->sigarray, 0, sizeof(thread->sigarray));
+
+	gravity = flags & XNUSER ? XNTIMER_UGRAVITY : XNTIMER_KGRAVITY;
+	xntimer_init(&thread->rtimer, &nkclock, timeout_handler,
+		     sched, gravity);
+	xntimer_set_name(&thread->rtimer, thread->name);
+	xntimer_set_priority(&thread->rtimer, XNTIMER_HIPRIO);
+	xntimer_init(&thread->ptimer, &nkclock, periodic_handler,
+		     sched, gravity);
+	xntimer_set_name(&thread->ptimer, thread->name);
+	xntimer_set_priority(&thread->ptimer, XNTIMER_HIPRIO);
+
+	thread->base_class = NULL; /* xnsched_set_policy() will set it. */
+	ret = xnsched_init_thread(thread);
+	if (ret)
+		goto err_out;
+
+	ret = xnsched_set_policy(thread, sched_class, sched_param);
+	if (ret)
+		goto err_out;
+
+	if ((flags & (XNUSER|XNROOT)) == 0) {
+		ret = spawn_kthread(thread);
+		if (ret)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	xntimer_destroy(&thread->rtimer);
+	xntimer_destroy(&thread->ptimer);
+
+	return ret;
+}
+
+void xnthread_deregister(struct xnthread *thread)
+{
+	if (thread->handle != XN_NO_HANDLE)
+		xnregistry_remove(thread->handle);
+
+	thread->handle = XN_NO_HANDLE;
+}
+
+char *xnthread_format_status(unsigned long status, char *buf, int size)
+{
+	static const char labels[] = XNTHREAD_STATE_LABELS;
+	int pos, c, mask;
+	char *wp;
+
+	for (mask = (int)status, pos = 0, wp = buf;
+	     mask != 0 && wp - buf < size - 2;	/* 1-letter label + \0 */
+	     mask >>= 1, pos++) {
+		if ((mask & 1) == 0)
+			continue;
+
+		c = labels[pos];
+
+		switch (1 << pos) {
+		case XNROOT:
+			c = 'R'; /* Always mark root as runnable. */
+			break;
+		case XNREADY:
+			if (status & XNROOT)
+				continue; /* Already reported on XNROOT. */
+			break;
+		case XNDELAY:
+			/*
+			 * Only report genuine delays here, not timed
+			 * waits for resources.
+			 */
+			if (status & XNPEND)
+				continue;
+			break;
+		case XNPEND:
+			/* Report timed waits with lowercase symbol. */
+			if (status & XNDELAY)
+				c |= 0x20;
+			break;
+		default:
+			if (c == '.')
+				continue;
+		}
+		*wp++ = c;
+	}
+
+	*wp = '\0';
+
+	return buf;
+}
+
+pid_t xnthread_host_pid(struct xnthread *thread)
+{
+	if (xnthread_test_state(thread, XNROOT))
+		return 0;
+	if (!xnthread_host_task(thread))
+		return -1;
+
+	return task_pid_nr(xnthread_host_task(thread));
+}
+
+int xnthread_set_clock(struct xnthread *thread, struct xnclock *newclock)
+{
+	spl_t s;
+
+	if (thread == NULL) {
+		thread = xnthread_current();
+		if (thread == NULL)
+			return -EPERM;
+	}
+	
+	/* Change the clock the thread's periodic timer is paced by. */
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_set_clock(&thread->ptimer, newclock);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_clock);
+
+xnticks_t xnthread_get_timeout(struct xnthread *thread, xnticks_t ns)
+{
+	struct xntimer *timer;
+	xnticks_t timeout;
+
+	if (!xnthread_test_state(thread,XNDELAY))
+		return 0LL;
+
+	if (xntimer_running_p(&thread->rtimer))
+		timer = &thread->rtimer;
+	else if (xntimer_running_p(&thread->ptimer))
+		timer = &thread->ptimer;
+	else
+		return 0LL;
+
+	timeout = xntimer_get_date(timer);
+	if (timeout <= ns)
+		return 1;
+
+	return timeout - ns;
+}
+EXPORT_SYMBOL_GPL(xnthread_get_timeout);
+
+xnticks_t xnthread_get_period(struct xnthread *thread)
+{
+	xnticks_t period = 0;
+	/*
+	 * The current thread period might be:
+	 * - the value of the timer interval for periodic threads (ns/ticks)
+	 * - or, the value of the alloted round-robin quantum (ticks)
+	 * - or zero, meaning "no periodic activity".
+	 */
+	if (xntimer_running_p(&thread->ptimer))
+		period = xntimer_interval(&thread->ptimer);
+	else if (xnthread_test_state(thread,XNRRB))
+		period = thread->rrperiod;
+
+	return period;
+}
+EXPORT_SYMBOL_GPL(xnthread_get_period);
+
+void xnthread_prepare_wait(struct xnthread_wait_context *wc)
+{
+	struct xnthread *curr = xnthread_current();
+
+	wc->posted = 0;
+	curr->wcontext = wc;
+}
+EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
+
+static inline void release_all_ownerships(struct xnthread *curr)
+{
+	struct xnsynch *synch, *tmp;
+
+	/*
+	 * Release all the ownerships obtained by a thread on
+	 * synchronization objects. This routine must be entered
+	 * interrupts off.
+	 */
+	xnthread_for_each_booster_safe(synch, tmp, curr) {
+		xnsynch_release(synch, curr);
+		if (synch->cleanup)
+			synch->cleanup(synch);
+	}
+}
+
+static inline void cleanup_tcb(struct xnthread *curr) /* nklock held, irqs off */
+{
+	list_del(&curr->glink);
+	cobalt_nrthreads--;
+	xnvfile_touch_tag(&nkthreadlist_tag);
+
+	if (xnthread_test_state(curr, XNREADY)) {
+		XENO_BUG_ON(COBALT, xnthread_test_state(curr, XNTHREAD_BLOCK_BITS));
+		xnsched_dequeue(curr);
+		xnthread_clear_state(curr, XNREADY);
+	}
+
+	if (xnthread_test_state(curr, XNPEND))
+		xnsynch_forget_sleeper(curr);
+
+	xnthread_set_state(curr, XNZOMBIE);
+	/*
+	 * NOTE: we must be running over the root thread, or @curr
+	 * is dormant, which means that we don't risk sched->curr to
+	 * disappear due to voluntary rescheduling while holding the
+	 * nklock, despite @curr bears the zombie bit.
+	 */
+	release_all_ownerships(curr);
+
+	pipeline_finalize_thread(curr);
+	xnsched_forget(curr);
+	xnthread_deregister(curr);
+}
+
+void __xnthread_cleanup(struct xnthread *curr)
+{
+	spl_t s;
+
+	secondary_mode_only();
+
+	xntimer_destroy(&curr->rtimer);
+	xntimer_destroy(&curr->ptimer);
+
+	if (curr->selector) {
+		xnselector_destroy(curr->selector);
+		curr->selector = NULL;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+	cleanup_tcb(curr);
+	xnlock_put_irqrestore(&nklock, s);
+
+	/* Wake up the joiner if any (we can't have more than one). */
+	complete(&curr->exited);
+
+	/* Notify our exit to xnthread_killall() if need be. */
+	if (waitqueue_active(&join_all))
+		wake_up(&join_all);
+
+	/* Finalize last since this incurs releasing the TCB. */
+	xnthread_run_handler_stack(curr, finalize_thread);
+}
+
+/*
+ * Unwinds xnthread_init() ops for an unmapped thread.  Since the
+ * latter must be dormant, it can't be part of any runqueue.
+ */
+void __xnthread_discard(struct xnthread *thread)
+{
+	spl_t s;
+
+	secondary_mode_only();
+
+	xntimer_destroy(&thread->rtimer);
+	xntimer_destroy(&thread->ptimer);
+
+	xnlock_get_irqsave(&nklock, s);
+	if (!list_empty(&thread->glink)) {
+		list_del(&thread->glink);
+		cobalt_nrthreads--;
+		xnvfile_touch_tag(&nkthreadlist_tag);
+	}
+	xnthread_deregister(thread);
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+/**
+ * @fn void xnthread_init(struct xnthread *thread,const struct xnthread_init_attr *attr,struct xnsched_class *sched_class,const union xnsched_policy_param *sched_param)
+ * @brief Initialize a new thread.
+ *
+ * Initializes a new thread. The thread is left dormant until it is
+ * actually started by xnthread_start().
+ *
+ * @param thread The address of a thread descriptor Cobalt will use to
+ * store the thread-specific data.  This descriptor must always be
+ * valid while the thread is active therefore it must be allocated in
+ * permanent memory. @warning Some architectures may require the
+ * descriptor to be properly aligned in memory; this is an additional
+ * reason for descriptors not to be laid in the program stack where
+ * alignement constraints might not always be satisfied.
+ *
+ * @param attr A pointer to an attribute block describing the initial
+ * properties of the new thread. Members of this structure are defined
+ * as follows:
+ *
+ * - name: An ASCII string standing for the symbolic name of the
+ * thread. This name is copied to a safe place into the thread
+ * descriptor. This name might be used in various situations by Cobalt
+ * for issuing human-readable diagnostic messages, so it is usually a
+ * good idea to provide a sensible value here.  NULL is fine though
+ * and means "anonymous".
+ *
+ * - flags: A set of creation flags affecting the operation. The
+ * following flags can be part of this bitmask:
+ *
+ *   - XNSUSP creates the thread in a suspended state. In such a case,
+ * the thread shall be explicitly resumed using the xnthread_resume()
+ * service for its execution to actually begin, additionally to
+ * issuing xnthread_start() for it. This flag can also be specified
+ * when invoking xnthread_start() as a starting mode.
+ *
+ * - XNUSER shall be set if @a thread will be mapped over an existing
+ * user-space task. Otherwise, a new kernel host task is created, then
+ * paired with the new Xenomai thread.
+ *
+ * - XNFPU (enable FPU) tells Cobalt that the new thread may use the
+ * floating-point unit. XNFPU is implicitly assumed for user-space
+ * threads even if not set in @a flags.
+ *
+ * - affinity: The processor affinity of this thread. Passing
+ * CPU_MASK_ALL means "any cpu" from the allowed core affinity mask
+ * (cobalt_cpu_affinity). Passing an empty set is invalid.
+ *
+ * @param sched_class The initial scheduling class the new thread
+ * should be assigned to.
+ *
+ * @param sched_param The initial scheduling parameters to set for the
+ * new thread; @a sched_param must be valid within the context of @a
+ * sched_class.
+ *
+ * @return 0 is returned on success. Otherwise, the following error
+ * code indicates the cause of the failure:
+ *
+ * - -EINVAL is returned if @a attr->flags has invalid bits set, or @a
+ *   attr->affinity is invalid (e.g. empty).
+ *
+ * @coretags{secondary-only}
+ */
+int xnthread_init(struct xnthread *thread,
+		  const struct xnthread_init_attr *attr,
+		  struct xnsched_class *sched_class,
+		  const union xnsched_policy_param *sched_param)
+{
+	struct xnsched *sched;
+	cpumask_t affinity;
+	int ret;
+
+	if (attr->flags & ~(XNFPU | XNUSER | XNSUSP))
+		return -EINVAL;
+
+	/*
+	 * Pick an initial CPU for the new thread which is part of its
+	 * affinity mask, and therefore also part of the supported
+	 * CPUs. This CPU may change in pin_to_initial_cpu().
+	 */
+	cpumask_and(&affinity, &attr->affinity, &cobalt_cpu_affinity);
+	if (cpumask_empty(&affinity))
+		return -EINVAL;
+
+	sched = xnsched_struct(cpumask_first(&affinity));
+
+	ret = __xnthread_init(thread, attr, sched, sched_class, sched_param);
+	if (ret)
+		return ret;
+
+	trace_cobalt_thread_init(thread, attr, sched_class);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_init);
+
+/**
+ * @fn int xnthread_start(struct xnthread *thread,const struct xnthread_start_attr *attr)
+ * @brief Start a newly created thread.
+ *
+ * Starts a (newly) created thread, scheduling it for the first
+ * time. This call releases the target thread from the XNDORMANT
+ * state. This service also sets the initial mode for the new thread.
+ *
+ * @param thread The descriptor address of the started thread which
+ * must have been previously initialized by a call to xnthread_init().
+ *
+ * @param attr A pointer to an attribute block describing the
+ * execution properties of the new thread. Members of this structure
+ * are defined as follows:
+ *
+ * - mode: The initial thread mode. The following flags can be part of
+ * this bitmask:
+ *
+ *   - XNLOCK causes the thread to lock the scheduler when it starts.
+ * The target thread will have to call the xnsched_unlock()
+ * service to unlock the scheduler. A non-preemptible thread may still
+ * block, in which case, the lock is reasserted when the thread is
+ * scheduled back in.
+ *
+ *   - XNSUSP makes the thread start in a suspended state. In such a
+ * case, the thread will have to be explicitly resumed using the
+ * xnthread_resume() service for its execution to actually begin.
+ *
+ * - entry: The address of the thread's body routine. In other words,
+ * it is the thread entry point.
+ *
+ * - cookie: A user-defined opaque cookie Cobalt will pass to the
+ * emerging thread as the sole argument of its entry point.
+ *
+ * @retval 0 if @a thread could be started ;
+ *
+ * @retval -EBUSY if @a thread was not dormant or stopped ;
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int xnthread_start(struct xnthread *thread,
+		   const struct xnthread_start_attr *attr)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!xnthread_test_state(thread, XNDORMANT)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBUSY;
+	}
+
+	xnthread_set_state(thread, attr->mode & (XNTHREAD_MODE_BITS | XNSUSP));
+	thread->entry = attr->entry;
+	thread->cookie = attr->cookie;
+	if (attr->mode & XNLOCK)
+		thread->lock_count = 1;
+
+	/*
+	 * A user-space thread starts immediately Cobalt-wise since we
+	 * already have an underlying Linux context for it, so we can
+	 * enlist it now to make it visible from the /proc interface.
+	 */
+	if (xnthread_test_state(thread, XNUSER))
+		enlist_new_thread(thread);
+
+	trace_cobalt_thread_start(thread);
+
+	xnthread_resume(thread, XNDORMANT);
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_start);
+
+/**
+ * @fn void xnthread_set_mode(int clrmask,int setmask)
+ * @brief Change control mode of the current thread.
+ *
+ * Change the control mode of the current thread. The control mode
+ * affects several behaviours of the Cobalt core regarding this
+ * thread.
+ *
+ * @param clrmask Clears the corresponding bits from the control mode
+ * before setmask is applied. The scheduler lock held by the current
+ * thread can be forcibly released by passing the XNLOCK bit in this
+ * mask. In this case, the lock nesting count is also reset to zero.
+ *
+ * @param setmask The new thread mode. The following flags may be set
+ * in this bitmask:
+ *
+ * - XNLOCK makes the current thread non-preemptible by other threads.
+ * Unless XNTRAPLB is also set for the thread, the latter may still
+ * block, dropping the lock temporarily, in which case, the lock will
+ * be reacquired automatically when the thread resumes execution.
+ *
+ * - XNWARN enables debugging notifications for the current thread.  A
+ * SIGDEBUG (Linux-originated) signal is sent when the following
+ * atypical or abnormal behavior is detected:
+ *
+ *    - the current thread switches to secondary mode. Such notification
+ *      comes in handy for detecting spurious relaxes.
+ *
+ *    - CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED is enabled in the kernel
+ *      configuration, and the current thread is sleeping on a Cobalt
+ *      mutex currently owned by a thread running in secondary mode,
+ *      which reveals a priority inversion.
+ *
+ *    - the current thread is about to sleep while holding a Cobalt
+ *      mutex, and CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP is enabled in the
+ *      kernel configuration. Blocking for acquiring a mutex does not
+ *      trigger such a signal though.
+ *
+ *    - the current thread has both XNTRAPLB and XNLOCK set, and
+ *      attempts to block on a Cobalt service, which would cause a
+ *      lock break.
+ *
+ * - XNTRAPLB disallows breaking the scheduler lock. In the default
+ * case, a thread which holds the scheduler lock is allowed to drop it
+ * temporarily for sleeping. If this mode bit is set, such thread
+ * would return immediately with XNBREAK set from
+ * xnthread_suspend(). If XNWARN is set for the current thread,
+ * SIGDEBUG is sent in addition to raising the break condition.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note Setting @a clrmask and @a setmask to zero leads to a nop,
+ * in which case xnthread_set_mode() returns the current mode.
+ */
+int xnthread_set_mode(int clrmask, int setmask)
+{
+	int oldmode, lock_count;
+	struct xnthread *curr;
+	spl_t s;
+
+	primary_mode_only();
+
+	xnlock_get_irqsave(&nklock, s);
+	curr = xnsched_current_thread();
+	oldmode = xnthread_get_state(curr) & XNTHREAD_MODE_BITS;
+	lock_count = curr->lock_count;
+	xnthread_clear_state(curr, clrmask & XNTHREAD_MODE_BITS);
+	xnthread_set_state(curr, setmask & XNTHREAD_MODE_BITS);
+	trace_cobalt_thread_set_mode(curr);
+
+	if (setmask & XNLOCK) {
+		if (lock_count == 0)
+			xnsched_lock();
+	} else if (clrmask & XNLOCK) {
+		if (lock_count > 0) {
+			curr->lock_count = 0;
+			xnthread_clear_localinfo(curr, XNLBALERT);
+			xnsched_run();
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (lock_count > 0)
+		oldmode |= XNLOCK;
+
+	return oldmode;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_mode);
+
+/**
+ * @fn void xnthread_suspend(struct xnthread *thread, int mask,xnticks_t timeout, xntmode_t timeout_mode,struct xnsynch *wchan)
+ * @brief Suspend a thread.
+ *
+ * Suspends the execution of a thread according to a given suspensive
+ * condition. This thread will not be eligible for scheduling until it
+ * all the pending suspensive conditions set by this service are
+ * removed by one or more calls to xnthread_resume().
+ *
+ * @param thread The descriptor address of the suspended thread.
+ *
+ * @param mask The suspension mask specifying the suspensive condition
+ * to add to the thread's wait mask. Possible values usable by the
+ * caller are:
+ *
+ * - XNSUSP. This flag forcibly suspends a thread, regardless of any
+ * resource to wait for. A reverse call to xnthread_resume()
+ * specifying the XNSUSP bit must be issued to remove this condition,
+ * which is cumulative with other suspension bits.@a wchan should be
+ * NULL when using this suspending mode.
+ *
+ * - XNDELAY. This flags denotes a counted delay wait (in ticks) which
+ * duration is defined by the value of the timeout parameter.
+ *
+ * - XNPEND. This flag denotes a wait for a synchronization object to
+ * be signaled. The wchan argument must points to this object. A
+ * timeout value can be passed to bound the wait. This suspending mode
+ * should not be used directly by the client interface, but rather
+ * through the xnsynch_sleep_on() call.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread pends on a resource. This value is a wait time given in
+ * nanoseconds. It can either be relative, absolute monotonic, or
+ * absolute adjustable depending on @a timeout_mode.
+ *
+ * Passing XN_INFINITE @b and setting @a timeout_mode to XN_RELATIVE
+ * specifies an unbounded wait. All other values are used to
+ * initialize a watchdog timer. If the current operation mode of the
+ * system timer is oneshot and @a timeout elapses before
+ * xnthread_suspend() has completed, then the target thread will not
+ * be suspended, and this routine leads to a null effect.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @param wchan The address of a pended resource. This parameter is
+ * used internally by the synchronization object implementation code
+ * to specify on which object the suspended thread pends. NULL is a
+ * legitimate value when this parameter does not apply to the current
+ * suspending mode (e.g. XNSUSP).
+ *
+ * @note If the target thread has received a Linux-originated signal,
+ * then this service immediately exits without suspending the thread,
+ * but raises the XNBREAK condition in its information mask.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void xnthread_suspend(struct xnthread *thread, int mask,
+		      xnticks_t timeout, xntmode_t timeout_mode,
+		      struct xnsynch *wchan)
+{
+	unsigned long oldstate;
+	struct xnsched *sched;
+	spl_t s;
+
+	/* No, you certainly do not want to suspend the root thread. */
+	XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT));
+	/* No built-in support for conjunctive wait. */
+	XENO_BUG_ON(COBALT, wchan && thread->wchan);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_thread_suspend(thread, mask, timeout, timeout_mode, wchan);
+
+	sched = thread->sched;
+	oldstate = thread->state;
+
+	/*
+	 * If attempting to suspend a runnable thread which is pending
+	 * a forced switch to secondary mode (XNKICKED), just raise
+	 * the XNBREAK status and return immediately, except if we
+	 * are precisely doing such switch by applying XNRELAX.
+	 *
+	 * In the latter case, we also make sure to clear XNKICKED,
+	 * since we won't go through prepare_for_signal() once
+	 * relaxed.
+	 */
+	if (likely((oldstate & XNTHREAD_BLOCK_BITS) == 0)) {
+		if (likely((mask & XNRELAX) == 0)) {
+			if (xnthread_test_info(thread, XNKICKED))
+				goto abort;
+			if (thread == sched->curr &&
+			    thread->lock_count > 0 &&
+			    (oldstate & XNTRAPLB) != 0)
+				goto lock_break;
+		}
+		/*
+		 * Do not destroy the info left behind by yet unprocessed
+		 * wakeups when suspending a remote thread.
+		 */
+		if (thread == sched->curr)
+			xnthread_clear_info(thread, XNRMID|XNTIMEO|XNBREAK|
+						    XNWAKEN|XNROBBED|XNKICKED);
+	}
+
+	/*
+	 * Don't start the timer for a thread delayed indefinitely.
+	 */
+	if (timeout != XN_INFINITE || timeout_mode != XN_RELATIVE) {
+		xntimer_set_affinity(&thread->rtimer, thread->sched);
+		if (xntimer_start(&thread->rtimer, timeout, XN_INFINITE,
+				  timeout_mode)) {
+			/* (absolute) timeout value in the past, bail out. */
+			if (wchan) {
+				thread->wchan = wchan;
+				xnsynch_forget_sleeper(thread);
+			}
+			xnthread_set_info(thread, XNTIMEO);
+			goto out;
+		}
+		xnthread_set_state(thread, XNDELAY);
+	}
+
+	if (oldstate & XNREADY) {
+		xnsched_dequeue(thread);
+		xnthread_clear_state(thread, XNREADY);
+	}
+
+	xnthread_set_state(thread, mask);
+
+	/*
+	 * We must make sure that we don't clear the wait channel if a
+	 * thread is first blocked (wchan != NULL) then forcibly
+	 * suspended (wchan == NULL), since these are conjunctive
+	 * conditions.
+	 */
+	if (wchan)
+		thread->wchan = wchan;
+
+	if (likely(thread == sched->curr)) {
+		xnsched_set_resched(sched);
+		/*
+		 * Transition to secondary mode (XNRELAX) is a
+		 * separate path which is only available to
+		 * xnthread_relax(). Using __xnsched_run() there for
+		 * rescheduling allows us to break the scheduler lock
+		 * temporarily.
+		 */
+		if (unlikely(mask & XNRELAX)) {
+			pipeline_leave_oob_unlock();
+			__xnsched_run(sched);
+			return;
+		}
+		/*
+		 * If the thread is runnning on a remote CPU,
+		 * xnsched_run() will trigger the IPI as required.  In
+		 * this case, sched refers to a remote runqueue, so
+		 * make sure to always kick the rescheduling procedure
+		 * for the local one.
+		 */
+		__xnsched_run(xnsched_current());
+		goto out;
+	}
+
+	/*
+	 * Ok, this one is an interesting corner case, which requires
+	 * a bit of background first. Here, we handle the case of
+	 * suspending a _relaxed_ user shadow which is _not_ the
+	 * current thread.
+	 *
+	 *  The net effect is that we are attempting to stop the
+	 * shadow thread for Cobalt, whilst this thread is actually
+	 * running some code under the control of the Linux scheduler
+	 * (i.e. it's relaxed).
+	 *
+	 *  To make this possible, we force the target Linux task to
+	 * migrate back to the Xenomai domain by sending it a
+	 * SIGSHADOW signal the interface libraries trap for this
+	 * specific internal purpose, whose handler is expected to
+	 * call back Cobalt's migration service.
+	 *
+	 * By forcing this migration, we make sure that Cobalt
+	 * controls, hence properly stops, the target thread according
+	 * to the requested suspension condition. Otherwise, the
+	 * shadow thread in secondary mode would just keep running
+	 * into the Linux domain, thus breaking the most common
+	 * assumptions regarding suspended threads.
+	 *
+	 * We only care for threads that are not current, and for
+	 * XNSUSP, XNDELAY, XNDORMANT and XNHELD conditions, because:
+	 *
+	 * - There is no point in dealing with a relaxed thread which
+	 * is current, since personalities have to ask for primary
+	 * mode switch when processing any syscall which may block the
+	 * caller (i.e. __xn_exec_primary).
+	 *
+	 * - among all blocking bits (XNTHREAD_BLOCK_BITS), only
+	 * XNSUSP, XNDELAY, XNHELD and XNDBGSTOP may be applied by the
+	 * current thread to a non-current thread. XNPEND is always
+	 * added by the caller to its own state, XNMIGRATE, XNRELAX
+	 * and XNDBGSTOP have special semantics escaping this issue.
+	 *
+	 * We don't signal threads which are already in a dormant
+	 * state, since they are suspended by definition.
+	 */
+	if (((oldstate & (XNTHREAD_BLOCK_BITS|XNUSER)) == (XNRELAX|XNUSER)) &&
+	    (mask & (XNDELAY | XNSUSP | XNHELD)) != 0)
+		__xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HARDEN);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+	return;
+
+lock_break:
+	/* NOTE: thread is current */
+	if (xnthread_test_state(thread, XNWARN) &&
+	    !xnthread_test_localinfo(thread, XNLBALERT)) {
+		xnthread_set_info(thread, XNKICKED);
+		xnthread_set_localinfo(thread, XNLBALERT);
+		__xnthread_signal(thread, SIGDEBUG, SIGDEBUG_LOCK_BREAK);
+	}
+abort:
+	if (wchan) {
+		thread->wchan = wchan;
+		xnsynch_forget_sleeper(thread);
+	}
+	xnthread_clear_info(thread, XNRMID | XNTIMEO);
+	xnthread_set_info(thread, XNBREAK);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_suspend);
+
+/**
+ * @fn void xnthread_resume(struct xnthread *thread,int mask)
+ * @brief Resume a thread.
+ *
+ * Resumes the execution of a thread previously suspended by one or
+ * more calls to xnthread_suspend(). This call removes a suspensive
+ * condition affecting the target thread. When all suspensive
+ * conditions are gone, the thread is left in a READY state at which
+ * point it becomes eligible anew for scheduling.
+ *
+ * @param thread The descriptor address of the resumed thread.
+ *
+ * @param mask The suspension mask specifying the suspensive condition
+ * to remove from the thread's wait mask. Possible values usable by
+ * the caller are:
+ *
+ * - XNSUSP. This flag removes the explicit suspension condition. This
+ * condition might be additive to the XNPEND condition.
+ *
+ * - XNDELAY. This flag removes the counted delay wait condition.
+ *
+ * - XNPEND. This flag removes the resource wait condition. If a
+ * watchdog is armed, it is automatically disarmed by this
+ * call. Unlike the two previous conditions, only the current thread
+ * can set this condition for itself, i.e. no thread can force another
+ * one to pend on a resource.
+ *
+ * When the thread is eventually resumed by one or more calls to
+ * xnthread_resume(), the caller of xnthread_suspend() in the awakened
+ * thread that suspended itself should check for the following bits in
+ * its own information mask to determine what caused its wake up:
+ *
+ * - XNRMID means that the caller must assume that the pended
+ * synchronization object has been destroyed (see xnsynch_flush()).
+ *
+ * - XNTIMEO means that the delay elapsed, or the watchdog went off
+ * before the corresponding synchronization object was signaled.
+ *
+ * - XNBREAK means that the wait has been forcibly broken by a call to
+ * xnthread_unblock().
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void xnthread_resume(struct xnthread *thread, int mask)
+{
+	unsigned long oldstate;
+	struct xnsched *sched;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_thread_resume(thread, mask);
+
+	xntrace_pid(xnthread_host_pid(thread), xnthread_current_priority(thread));
+
+	sched = thread->sched;
+	oldstate = thread->state;
+
+	if ((oldstate & XNTHREAD_BLOCK_BITS) == 0) {
+		if (oldstate & XNREADY)
+			xnsched_dequeue(thread);
+		goto enqueue;
+	}
+
+	/* Clear the specified block bit(s) */
+	xnthread_clear_state(thread, mask);
+
+	/*
+	 * If XNDELAY was set in the clear mask, xnthread_unblock()
+	 * was called for the thread, or a timeout has elapsed. In the
+	 * latter case, stopping the timer is a no-op.
+	 */
+	if (mask & XNDELAY)
+		xntimer_stop(&thread->rtimer);
+
+	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS))
+		goto clear_wchan;
+
+	if (mask & XNDELAY) {
+		mask = xnthread_test_state(thread, XNPEND);
+		if (mask == 0)
+			goto unlock_and_exit;
+		if (thread->wchan)
+			xnsynch_forget_sleeper(thread);
+		goto recheck_state;
+	}
+
+	if (xnthread_test_state(thread, XNDELAY)) {
+		if (mask & XNPEND) {
+			/*
+			 * A resource became available to the thread.
+			 * Cancel the watchdog timer.
+			 */
+			xntimer_stop(&thread->rtimer);
+			xnthread_clear_state(thread, XNDELAY);
+		}
+		goto recheck_state;
+	}
+
+	/*
+	 * The thread is still suspended, but is no more pending on a
+	 * resource.
+	 */
+	if ((mask & XNPEND) != 0 && thread->wchan)
+		xnsynch_forget_sleeper(thread);
+
+	goto unlock_and_exit;
+
+recheck_state:
+	if (xnthread_test_state(thread, XNTHREAD_BLOCK_BITS))
+		goto unlock_and_exit;
+
+clear_wchan:
+	if ((mask & ~XNDELAY) != 0 && thread->wchan != NULL)
+		/*
+		 * If the thread was actually suspended, clear the
+		 * wait channel.  -- this allows requests like
+		 * xnthread_suspend(thread,XNDELAY,...)  not to run
+		 * the following code when the suspended thread is
+		 * woken up while undergoing a simple delay.
+		 */
+		xnsynch_forget_sleeper(thread);
+
+	if (unlikely((oldstate & mask) & XNHELD)) {
+		xnsched_requeue(thread);
+		goto ready;
+	}
+enqueue:
+	xnsched_enqueue(thread);
+ready:
+	xnthread_set_state(thread, XNREADY);
+	xnsched_set_resched(sched);
+unlock_and_exit:
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_resume);
+
+/**
+ * @fn int xnthread_unblock(struct xnthread *thread)
+ * @brief Unblock a thread.
+ *
+ * Breaks the thread out of any wait it is currently in.  This call
+ * removes the XNDELAY and XNPEND suspensive conditions previously put
+ * by xnthread_suspend() on the target thread. If all suspensive
+ * conditions are gone, the thread is left in a READY state at which
+ * point it becomes eligible anew for scheduling.
+ *
+ * @param thread The descriptor address of the unblocked thread.
+ *
+ * This call neither releases the thread from the XNSUSP, XNRELAX,
+ * XNDORMANT or XNHELD suspensive conditions.
+ *
+ * When the thread resumes execution, the XNBREAK bit is set in the
+ * unblocked thread's information mask. Unblocking a non-blocked
+ * thread is perfectly harmless.
+ *
+ * @return non-zero is returned if the thread was actually unblocked
+ * from a pending wait state, 0 otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+int xnthread_unblock(struct xnthread *thread)
+{
+	int ret = 1;
+	spl_t s;
+
+	/*
+	 * Attempt to abort an undergoing wait for the given thread.
+	 * If this state is due to an alarm that has been armed to
+	 * limit the sleeping thread's waiting time while it pends for
+	 * a resource, the corresponding XNPEND state will be cleared
+	 * by xnthread_resume() in the same move. Otherwise, this call
+	 * may abort an undergoing infinite wait for a resource (if
+	 * any).
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_thread_unblock(thread);
+
+	if (xnthread_test_state(thread, XNDELAY))
+		xnthread_resume(thread, XNDELAY);
+	else if (xnthread_test_state(thread, XNPEND))
+		xnthread_resume(thread, XNPEND);
+	else
+		ret = 0;
+
+	/*
+	 * We should not clear a previous break state if this service
+	 * is called more than once before the target thread actually
+	 * resumes, so we only set the bit here and never clear
+	 * it. However, we must not raise the XNBREAK bit if the
+	 * target thread was already awake at the time of this call,
+	 * so that downstream code does not get confused by some
+	 * "successful but interrupted syscall" condition. IOW, a
+	 * break state raised here must always trigger an error code
+	 * downstream, and an already successful syscall cannot be
+	 * marked as interrupted.
+	 */
+	if (ret)
+		xnthread_set_info(thread, XNBREAK);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_unblock);
+
+/**
+ * @fn int xnthread_set_periodic(struct xnthread *thread,xnticks_t idate, xntmode_t timeout_mode, xnticks_t period)
+ * @brief Make a thread periodic.
+ *
+ * Make a thread periodic by programming its first release point and
+ * its period in the processor time line.  Subsequent calls to
+ * xnthread_wait_period() will delay the thread until the next
+ * periodic release point in the processor timeline is reached.
+ *
+ * @param thread The core thread to make periodic. If NULL, the
+ * current thread is assumed.
+ *
+ * @param idate The initial (absolute) date of the first release
+ * point, expressed in nanoseconds. The affected thread will be
+ * delayed by the first call to xnthread_wait_period() until this
+ * point is reached. If @a idate is equal to XN_INFINITE, the first
+ * release point is set to @a period nanoseconds after the current
+ * date. In the latter case, @a timeout_mode is not considered and can
+ * have any valid value.
+ *
+ * @param timeout_mode The mode of the @a idate parameter. It can
+ * either be set to XN_ABSOLUTE or XN_REALTIME with @a idate different
+ * from XN_INFINITE (see also xntimer_start()).
+ *
+ * @param period The period of the thread, expressed in nanoseconds.
+ * As a side-effect, passing XN_INFINITE attempts to stop the thread's
+ * periodic timer; in the latter case, the routine always exits
+ * succesfully, regardless of the previous state of this timer.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -ETIMEDOUT is returned @a idate is different from XN_INFINITE and
+ * represents a date in the past.
+ *
+ * - -EINVAL is returned if @a period is different from XN_INFINITE
+ * but shorter than the scheduling latency value for the target
+ * system, as available from /proc/xenomai/latency. -EINVAL is also
+ * returned if @a timeout_mode is not compatible with @a idate, such
+ * as XN_RELATIVE with @a idate different from XN_INFINITE.
+ *
+ * - -EPERM is returned if @a thread is NULL, but the caller is not a
+ * Xenomai thread.
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnthread_set_periodic(struct xnthread *thread, xnticks_t idate,
+			  xntmode_t timeout_mode, xnticks_t period)
+{
+	int ret = 0;
+	spl_t s;
+
+	if (thread == NULL) {
+		thread = xnthread_current();
+		if (thread == NULL)
+			return -EPERM;
+	}
+		
+	xnlock_get_irqsave(&nklock, s);
+
+	if (period == XN_INFINITE) {
+		if (xntimer_running_p(&thread->ptimer))
+			xntimer_stop(&thread->ptimer);
+
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * LART: detect periods which are shorter than the core clock
+	 * gravity for kernel thread timers. This can't work, caller
+	 * must have messed up arguments.
+	 */
+	if (period < xnclock_ticks_to_ns(&nkclock,
+			 xnclock_get_gravity(&nkclock, kernel))) {
+		ret = -EINVAL;
+		goto unlock_and_exit;
+	}
+
+	xntimer_set_affinity(&thread->ptimer, thread->sched);
+
+	if (idate == XN_INFINITE)
+		xntimer_start(&thread->ptimer, period, period, XN_RELATIVE);
+	else {
+		if (timeout_mode == XN_REALTIME)
+			idate -= xnclock_get_offset(xntimer_clock(&thread->ptimer));
+		else if (timeout_mode != XN_ABSOLUTE) {
+			ret = -EINVAL;
+			goto unlock_and_exit;
+		}
+		ret = xntimer_start(&thread->ptimer, idate, period,
+				    XN_ABSOLUTE);
+	}
+
+unlock_and_exit:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_periodic);
+
+/**
+ * @fn int xnthread_wait_period(unsigned long *overruns_r)
+ * @brief Wait for the next periodic release point.
+ *
+ * Make the current thread wait for the next periodic release point in
+ * the processor time line.
+ *
+ * @param overruns_r If non-NULL, @a overruns_r must be a pointer to a
+ * memory location which will be written with the count of pending
+ * overruns. This value is copied only when xnthread_wait_period()
+ * returns -ETIMEDOUT or success; the memory location remains
+ * unmodified otherwise. If NULL, this count will never be copied
+ * back.
+ *
+ * @return 0 is returned upon success; if @a overruns_r is valid, zero
+ * is copied to the pointed memory location. Otherwise:
+ *
+ * - -EWOULDBLOCK is returned if xnthread_set_periodic() has not
+ * previously been called for the calling thread.
+ *
+ * - -EINTR is returned if xnthread_unblock() has been called for the
+ * waiting thread before the next periodic release point has been
+ * reached. In this case, the overrun counter is reset too.
+ *
+ * - -ETIMEDOUT is returned if the timer has overrun, which indicates
+ * that one or more previous release points have been missed by the
+ * calling thread. If @a overruns_r is valid, the count of pending
+ * overruns is copied to the pointed memory location.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int xnthread_wait_period(unsigned long *overruns_r)
+{
+	unsigned long overruns = 0;
+	struct xnthread *thread;
+	struct xnclock *clock;
+	xnticks_t now;
+	int ret = 0;
+	spl_t s;
+
+	thread = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(!xntimer_running_p(&thread->ptimer))) {
+		ret = -EWOULDBLOCK;
+		goto out;
+	}
+
+	trace_cobalt_thread_wait_period(thread);
+
+	clock = xntimer_clock(&thread->ptimer);
+	now = xnclock_read_raw(clock);
+	if (likely((xnsticks_t)(now - xntimer_pexpect(&thread->ptimer)) < 0)) {
+		xnthread_suspend(thread, XNDELAY, XN_INFINITE, XN_RELATIVE, NULL);
+		if (unlikely(xnthread_test_info(thread, XNBREAK))) {
+			ret = -EINTR;
+			goto out;
+		}
+
+		now = xnclock_read_raw(clock);
+	}
+
+	overruns = xntimer_get_overruns(&thread->ptimer, thread, now);
+	if (overruns) {
+		ret = -ETIMEDOUT;
+		trace_cobalt_thread_missed_period(thread);
+	}
+
+	if (likely(overruns_r != NULL))
+		*overruns_r = overruns;
+ out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_wait_period);
+
+/**
+ * @fn int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
+ * @brief Set thread time-slicing information.
+ *
+ * Update the time-slicing information for a given thread. This
+ * service enables or disables round-robin scheduling for the thread,
+ * depending on the value of @a quantum. By default, times-slicing is
+ * disabled for a new thread initialized by a call to xnthread_init().
+ *
+ * @param thread The descriptor address of the affected thread.
+ *
+ * @param quantum The time quantum assigned to the thread expressed in
+ * nanoseconds. If @a quantum is different from XN_INFINITE, the
+ * time-slice for the thread is set to that value and its current time
+ * credit is refilled (i.e. the thread is given a full time-slice to
+ * run next). Otherwise, if @a quantum equals XN_INFINITE,
+ * time-slicing is stopped for that thread.
+ *
+ * @return 0 is returned upon success. Otherwise, -EINVAL is returned
+ * if @a quantum is not XN_INFINITE and:
+ *
+ *   - the base scheduling class of the target thread does not support
+ *   time-slicing,
+ *
+ *   - @a quantum is smaller than the master clock gravity for a user
+ * thread, which denotes a spurious value.
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
+{
+	struct xnsched *sched;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sched = thread->sched;
+	thread->rrperiod = quantum;
+
+	if (quantum != XN_INFINITE) {
+		if (quantum <= xnclock_get_gravity(&nkclock, user) ||
+		    thread->base_class->sched_tick == NULL) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+		xnthread_set_state(thread, XNRRB);
+		if (sched->curr == thread)
+			xntimer_start(&sched->rrbtimer,
+				      quantum, XN_INFINITE, XN_RELATIVE);
+	} else {
+		xnthread_clear_state(thread, XNRRB);
+		if (sched->curr == thread)
+			xntimer_stop(&sched->rrbtimer);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_slice);
+
+/**
+ * @fn void xnthread_cancel(struct xnthread *thread)
+ * @brief Cancel a thread.
+ *
+ * Request cancellation of a thread. This service forces @a thread to
+ * exit from any blocking call, then to switch to secondary mode.
+ * @a thread will terminate as soon as it reaches a cancellation
+ * point. Cancellation points are defined for the following
+ * situations:
+ *
+ * - @a thread self-cancels by a call to xnthread_cancel().
+ * - @a thread invokes a Linux syscall (user-space shadow only).
+ * - @a thread receives a Linux signal (user-space shadow only).
+ * - @a thread unblocks from a Xenomai syscall (user-space shadow only).
+ * - @a thread attempts to block on a Xenomai syscall (user-space shadow only).
+ * - @a thread explicitly calls xnthread_test_cancel().
+ *
+ * @param thread The descriptor address of the thread to terminate.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note In addition to the common actions taken upon cancellation, a
+ * thread which belongs to the SCHED_WEAK class is sent a regular
+ * SIGTERM signal.
+ */
+void xnthread_cancel(struct xnthread *thread)
+{
+	spl_t s;
+
+	/* Right, so you want to kill the kernel?! */
+	XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT));
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_info(thread, XNCANCELD))
+		goto check_self_cancel;
+
+	trace_cobalt_thread_cancel(thread);
+
+	xnthread_set_info(thread, XNCANCELD);
+
+	/*
+	 * If @thread is not started yet, fake a start request,
+	 * raising the kicked condition bit to make sure it will reach
+	 * xnthread_test_cancel() on its wakeup path.
+	 */
+	if (xnthread_test_state(thread, XNDORMANT)) {
+		xnthread_set_info(thread, XNKICKED);
+		xnthread_resume(thread, XNDORMANT);
+		goto out;
+	}
+
+check_self_cancel:
+	if (xnthread_current() == thread) {
+		xnlock_put_irqrestore(&nklock, s);
+		xnthread_test_cancel();
+		/*
+		 * May return if on behalf of an IRQ handler which has
+		 * preempted @thread.
+		 */
+		return;
+	}
+
+	/*
+	 * Force the non-current thread to exit:
+	 *
+	 * - unblock a user thread, switch it to weak scheduling,
+	 * then send it SIGTERM.
+	 *
+	 * - just unblock a kernel thread, it is expected to reach a
+	 * cancellation point soon after
+	 * (i.e. xnthread_test_cancel()).
+	 */
+	if (xnthread_test_state(thread, XNUSER)) {
+		__xnthread_demote(thread);
+		__xnthread_signal(thread, SIGTERM, 0);
+	} else
+		__xnthread_kick(thread);
+out:
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_cancel);
+
+struct wait_grace_struct {
+	struct completion done;
+	struct rcu_head rcu;
+};
+
+static void grace_elapsed(struct rcu_head *head)
+{
+	struct wait_grace_struct *wgs;
+
+	wgs = container_of(head, struct wait_grace_struct, rcu);
+	complete(&wgs->done);
+}
+
+static void wait_for_rcu_grace_period(struct pid *pid)
+{
+	struct wait_grace_struct wait = {
+		.done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
+	};
+	struct task_struct *p;
+
+	init_rcu_head_on_stack(&wait.rcu);
+	
+	for (;;) {
+		call_rcu(&wait.rcu, grace_elapsed);
+		wait_for_completion(&wait.done);
+		if (pid == NULL)
+			break;
+		rcu_read_lock();
+		p = pid_task(pid, PIDTYPE_PID);
+		rcu_read_unlock();
+		if (p == NULL)
+			break;
+		reinit_completion(&wait.done);
+	}
+}
+
+/**
+ * @fn void xnthread_join(struct xnthread *thread, bool uninterruptible)
+ * @brief Join with a terminated thread.
+ *
+ * This service waits for @a thread to terminate after a call to
+ * xnthread_cancel().  If that thread has already terminated or is
+ * dormant at the time of the call, then xnthread_join() returns
+ * immediately.
+ *
+ * xnthread_join() adapts to the calling context (primary or
+ * secondary), switching to secondary mode if needed for the duration
+ * of the wait. Upon return, the original runtime mode is restored,
+ * unless a Linux signal is pending.
+ *
+ * @param thread The descriptor address of the thread to join with.
+ *
+ * @param uninterruptible Boolean telling whether the service should
+ * wait for completion uninterruptible.
+ *
+ * @return 0 is returned on success. Otherwise, the following error
+ * codes indicate the cause of the failure:
+ *
+ * - -EDEADLK is returned if the current thread attempts to join
+ * itself.
+ *
+ * - -EINTR is returned if the current thread was unblocked while
+ *   waiting for @a thread to terminate.
+ *
+ * - -EBUSY indicates that another thread is already waiting for @a
+ *   thread to terminate.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int xnthread_join(struct xnthread *thread, bool uninterruptible)
+{
+	struct xnthread *curr = xnthread_current();
+	int ret = 0, switched = 0;
+	struct pid *pid;
+	pid_t tpid;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT));
+
+	if (thread == curr)
+		return -EDEADLK;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(thread, XNJOINED)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (xnthread_test_info(thread, XNDORMANT))
+		goto out;
+
+	trace_cobalt_thread_join(thread);
+
+	xnthread_set_state(thread, XNJOINED);
+	tpid = xnthread_host_pid(thread);
+
+	if (curr && !xnthread_test_state(curr, XNRELAX)) {
+		xnlock_put_irqrestore(&nklock, s);
+		xnthread_relax(0, 0);
+		switched = 1;
+	} else
+		xnlock_put_irqrestore(&nklock, s);
+
+	/*
+	 * Since in theory, we might be sleeping there for a long
+	 * time, we get a reference on the pid struct holding our
+	 * target, then we check for its existence upon wake up.
+	 */
+	pid = find_get_pid(tpid);
+	if (pid == NULL)
+		goto done;
+
+	/*
+	 * We have a tricky issue to deal with, which involves code
+	 * relying on the assumption that a destroyed thread will have
+	 * scheduled away from do_exit() before xnthread_join()
+	 * returns. A typical example is illustrated by the following
+	 * sequence, with a RTDM kernel task implemented in a
+	 * dynamically loaded module:
+	 *
+	 * CPU0:  rtdm_task_destroy(ktask)
+	 *           xnthread_cancel(ktask)
+	 *           xnthread_join(ktask)
+	 *        ...<back to user>..
+	 *        rmmod(module)
+	 *
+	 * CPU1:  in ktask()
+	 *        ...
+	 *        ...
+	 *          __xnthread_test_cancel()
+	 *             do_exit()
+         *                schedule()
+	 *
+	 * In such a sequence, the code on CPU0 would expect the RTDM
+	 * task to have scheduled away upon return from
+	 * rtdm_task_destroy(), so that unmapping the destroyed task
+	 * code and data memory when unloading the module is always
+	 * safe.
+	 *
+	 * To address this, the joiner first waits for the joinee to
+	 * signal completion from the Cobalt thread cleanup handler
+	 * (__xnthread_cleanup), then waits for a full RCU grace
+	 * period to have elapsed. Since the completion signal is sent
+	 * on behalf of do_exit(), we may assume that the joinee has
+	 * scheduled away before the RCU grace period ends.
+	 */
+	if (uninterruptible)
+		wait_for_completion(&thread->exited);
+	else {
+		ret = wait_for_completion_interruptible(&thread->exited);
+		if (ret < 0) {
+			put_pid(pid);
+			return -EINTR;
+		}
+	}
+
+	/* Make sure the joinee has scheduled away ultimately. */
+	wait_for_rcu_grace_period(pid);
+
+	put_pid(pid);
+done:
+	ret = 0;
+	if (switched)
+		ret = xnthread_harden();
+
+	return ret;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_join);
+
+#ifdef CONFIG_SMP
+
+void xnthread_migrate_passive(struct xnthread *thread, struct xnsched *sched)
+{				/* nklocked, IRQs off */
+	if (thread->sched == sched)
+		return;
+
+	trace_cobalt_thread_migrate_passive(thread, xnsched_cpu(sched));
+	/*
+	 * Timer migration is postponed until the next timeout happens
+	 * for the periodic and rrb timers. The resource timer will be
+	 * moved to the right CPU next time it is armed in
+	 * xnthread_suspend().
+	 */
+	xnsched_migrate_passive(thread, sched);
+
+	xnstat_exectime_reset_stats(&thread->stat.lastperiod);
+}
+
+#endif	/* CONFIG_SMP */
+
+/**
+ * @fn int xnthread_set_schedparam(struct xnthread *thread,struct xnsched_class *sched_class,const union xnsched_policy_param *sched_param)
+ * @brief Change the base scheduling parameters of a thread.
+ *
+ * Changes the base scheduling policy and paramaters of a thread. If
+ * the thread is currently blocked, waiting in priority-pending mode
+ * (XNSYNCH_PRIO) for a synchronization object to be signaled, Cobalt
+ * will attempt to reorder the object's wait queue so that it reflects
+ * the new sleeper's priority, unless the XNSYNCH_DREORD flag has been
+ * set for the pended object.
+ *
+ * @param thread The descriptor address of the affected thread. See
+ * note.
+ *
+ * @param sched_class The new scheduling class the thread should be
+ * assigned to.
+ *
+ * @param sched_param The scheduling parameters to set for the thread;
+ * @a sched_param must be valid within the context of @a sched_class.
+ *
+ * It is absolutely required to use this service to change a thread
+ * priority, in order to have all the needed housekeeping chores
+ * correctly performed. i.e. Do *not* call xnsched_set_policy()
+ * directly or worse, change the thread.cprio field by hand in any
+ * case.
+ *
+ * @return 0 is returned on success. Otherwise, a negative error code
+ * indicates the cause of a failure that happened in the scheduling
+ * class implementation for @a sched_class. Invalid parameters passed
+ * into @a sched_param are common causes of error.
+ *
+ * @sideeffect
+ *
+ * - This service does not call the rescheduling procedure but may
+ * affect the state of the run queue for the previous and new
+ * scheduling classes.
+ *
+ * - Assigning the same scheduling class and parameters to a running
+ * or ready thread moves it to the end of the run queue, thus causing
+ * a manual round-robin, except if a priority boost is undergoing.
+ *
+ * @coretags{task-unregistred}
+ *
+ * @note The changes only apply to the Xenomai scheduling parameters
+ * for @a thread. There is no propagation/translation of such changes
+ * to the Linux scheduler for the task mated to the Xenomai target
+ * thread.
+ */
+int xnthread_set_schedparam(struct xnthread *thread,
+			    struct xnsched_class *sched_class,
+			    const union xnsched_policy_param *sched_param)
+{
+	spl_t s;
+	int ret;
+
+	xnlock_get_irqsave(&nklock, s);
+	ret = __xnthread_set_schedparam(thread, sched_class, sched_param);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_schedparam);
+
+int __xnthread_set_schedparam(struct xnthread *thread,
+			      struct xnsched_class *sched_class,
+			      const union xnsched_policy_param *sched_param)
+{
+	int old_wprio, new_wprio, ret;
+
+	old_wprio = thread->wprio;
+
+	ret = xnsched_set_policy(thread, sched_class, sched_param);
+	if (ret)
+		return ret;
+
+	new_wprio = thread->wprio;
+
+	/*
+	 * If the thread is waiting on a synchronization object,
+	 * update its position in the corresponding wait queue, unless
+	 * 1) reordering is explicitly disabled, or 2) the (weighted)
+	 * priority has not changed (to prevent spurious round-robin
+	 * effects).
+	 */
+	if (old_wprio != new_wprio && thread->wchan &&
+	    (thread->wchan->status & (XNSYNCH_DREORD|XNSYNCH_PRIO))
+	    == XNSYNCH_PRIO)
+		xnsynch_requeue_sleeper(thread);
+	/*
+	 * We should not move the thread at the end of its priority
+	 * group, if any of these conditions is true:
+	 *
+	 * - thread is not runnable;
+	 * - thread bears the ready bit which means that xnsched_set_policy()
+	 * already reordered the run queue;
+	 * - thread currently holds the scheduler lock, so we don't want
+	 * any round-robin effect to take place;
+	 * - a priority boost is undergoing for this thread.
+	 */
+	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS|XNREADY|XNBOOST) &&
+	    thread->lock_count == 0)
+		xnsched_putback(thread);
+
+	xnthread_set_info(thread, XNSCHEDP);
+	/* Ask the target thread to call back if relaxed. */
+	if (xnthread_test_state(thread, XNRELAX))
+		__xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HOME);
+
+	return ret;
+}
+
+void __xnthread_test_cancel(struct xnthread *curr)
+{
+	/*
+	 * Just in case xnthread_test_cancel() is called from an IRQ
+	 * handler, in which case we may not take the exit path.
+	 *
+	 * NOTE: curr->sched is stable from our POV and can't change
+	 * under our feet.
+	 */
+	if (curr->sched->lflags & XNINIRQ)
+		return;
+
+	if (!xnthread_test_state(curr, XNRELAX))
+		xnthread_relax(0, 0);
+
+	do_exit(0);
+	/* ... won't return ... */
+	XENO_BUG(COBALT);
+}
+EXPORT_SYMBOL_GPL(__xnthread_test_cancel);
+
+/**
+ * @internal
+ * @fn int xnthread_harden(void);
+ * @brief Migrate a Linux task to the Xenomai domain.
+ *
+ * This service causes the transition of "current" from the Linux
+ * domain to Xenomai. The shadow will resume in the Xenomai domain as
+ * returning from schedule().
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int xnthread_harden(void)
+{
+	struct task_struct *p = current;
+	struct xnthread *thread;
+	int ret;
+
+	secondary_mode_only();
+
+	thread = xnthread_current();
+	if (thread == NULL)
+		return -EPERM;
+
+	if (signal_pending(p))
+		return -ERESTARTSYS;
+
+	trace_cobalt_shadow_gohard(thread);
+
+	xnthread_clear_sync_window(thread, XNRELAX);
+
+	ret = pipeline_leave_inband();
+	if (ret) {
+		xnthread_test_cancel();
+		xnthread_set_sync_window(thread, XNRELAX);
+		return ret;
+	}
+
+	/* "current" is now running on the out-of-band stage. */
+
+	xnlock_clear_irqon(&nklock);
+	xnthread_test_cancel();
+
+	trace_cobalt_shadow_hardened(thread);
+
+	/*
+	 * Recheck pending signals once again. As we block task
+	 * wakeups during the migration and handle_sigwake_event()
+	 * ignores signals until XNRELAX is cleared, any signal
+	 * between entering TASK_HARDENING and starting the migration
+	 * is just silently queued up to here.
+	 */
+	if (signal_pending(p)) {
+		xnthread_relax(!xnthread_test_state(thread, XNSSTEP),
+			       SIGDEBUG_MIGRATE_SIGNAL);
+		return -ERESTARTSYS;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_harden);
+
+struct lostage_wakeup {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+	struct task_struct *task;
+};
+
+static void lostage_task_wakeup(struct pipeline_inband_work *inband_work)
+{
+	struct lostage_wakeup *rq;
+	struct task_struct *p;
+
+	rq = container_of(inband_work, struct lostage_wakeup, inband_work);
+	p = rq->task;
+
+	trace_cobalt_lostage_wakeup(p);
+
+	wake_up_process(p);
+}
+
+void __xnthread_propagate_schedparam(struct xnthread *curr)
+{
+	int kpolicy = SCHED_FIFO, kprio = curr->bprio, ret;
+	struct task_struct *p = current;
+	struct sched_param param;
+	spl_t s;
+
+	/*
+	 * Test-set race for XNSCHEDP is ok, the propagation is meant
+	 * to be done asap but not guaranteed to be carried out
+	 * immediately, and the request will remain pending until it
+	 * is eventually handled. We just have to protect against a
+	 * set-clear race.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	xnthread_clear_info(curr, XNSCHEDP);
+	xnlock_put_irqrestore(&nklock, s);
+
+	/*
+	 * Map our policies/priorities to the regular kernel's
+	 * (approximated).
+	 */
+	if (xnthread_test_state(curr, XNWEAK) && kprio == 0)
+		kpolicy = SCHED_NORMAL;
+	else if (kprio >= MAX_RT_PRIO)
+		kprio = MAX_RT_PRIO - 1;
+
+	if (p->policy != kpolicy || (kprio > 0 && p->rt_priority != kprio)) {
+		param.sched_priority = kprio;
+		ret = sched_setscheduler_nocheck(p, kpolicy, &param);
+		XENO_WARN_ON(COBALT, ret != 0);
+	}
+}
+
+/**
+ * @internal
+ * @fn void xnthread_relax(int notify, int reason);
+ * @brief Switch a shadow thread back to the Linux domain.
+ *
+ * This service yields the control of the running shadow back to
+ * Linux. This is obtained by suspending the shadow and scheduling a
+ * wake up call for the mated user task inside the Linux domain. The
+ * Linux task will resume on return from xnthread_suspend() on behalf
+ * of the root thread.
+ *
+ * @param notify A boolean flag indicating whether threads monitored
+ * from secondary mode switches should be sent a SIGDEBUG signal. For
+ * instance, some internal operations like task exit should not
+ * trigger such signal.
+ *
+ * @param reason The reason to report along with the SIGDEBUG signal.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note "current" is valid here since the shadow runs with the
+ * properties of the Linux task.
+ */
+void xnthread_relax(int notify, int reason)
+{
+	struct task_struct *p = current;
+	struct lostage_wakeup wakework = {
+		.inband_work = PIPELINE_INBAND_WORK_INITIALIZER(wakework,
+					lostage_task_wakeup),
+		.task = p,
+	};
+	struct xnthread *thread = xnthread_current();
+	int cpu __maybe_unused, suspension;
+	kernel_siginfo_t si;
+
+	primary_mode_only();
+
+	/*
+	 * Enqueue the request to move the running shadow from the Xenomai
+	 * domain to the Linux domain.  This will cause the Linux task
+	 * to resume using the register state of the shadow thread.
+	 */
+	trace_cobalt_shadow_gorelax(reason);
+
+	/*
+	 * If you intend to change the following interrupt-free
+	 * sequence, /first/ make sure to check the special handling
+	 * of XNRELAX in xnthread_suspend() when switching out the
+	 * current thread, not to break basic assumptions we make
+	 * there.
+	 *
+	 * We disable interrupts during the migration sequence, but
+	 * xnthread_suspend() has an interrupts-on section built in.
+	 */
+	trace_cobalt_lostage_request("wakeup", p);
+	pipeline_post_inband_work(&wakework);
+	/*
+	 * Grab the nklock to synchronize the Linux task state
+	 * manipulation with handle_sigwake_event. This lock will be
+	 * dropped by xnthread_suspend().
+	 */
+	splmax();
+	xnlock_get(&nklock);
+	xnthread_run_handler_stack(thread, relax_thread);
+	suspension = pipeline_leave_oob_prepare();
+	xnthread_suspend(thread, suspension, XN_INFINITE, XN_RELATIVE, NULL);
+	splnone();
+
+	/*
+	 * Basic sanity check after an expected transition to secondary
+	 * mode.
+	 */
+	XENO_WARN(COBALT, is_primary_domain(),
+		  "xnthread_relax() failed for thread %s[%d]",
+		  thread->name, xnthread_host_pid(thread));
+
+	pipeline_leave_oob_finish();
+
+	/* Account for secondary mode switch. */
+	xnstat_counter_inc(&thread->stat.ssw);
+
+	/*
+	 * When relaxing, we check for propagating to the regular
+	 * kernel new Cobalt schedparams that might have been set for
+	 * us while we were running in primary mode.
+	 *
+	 * CAUTION: This obviously won't update the schedparams cached
+	 * by the glibc for the caller in user-space, but this is the
+	 * deal: we don't relax threads which issue
+	 * pthread_setschedparam[_ex]() from primary mode, but then
+	 * only the kernel side (Cobalt and the host kernel) will be
+	 * aware of the change, and glibc might cache obsolete
+	 * information.
+	 */
+	xnthread_propagate_schedparam(thread);
+
+	if (xnthread_test_state(thread, XNUSER) && notify) {
+		if (xnthread_test_state(thread, XNWARN)) {
+			/* Help debugging spurious relaxes. */
+			xndebug_notify_relax(thread, reason);
+			memset(&si, 0, sizeof(si));
+			si.si_signo = SIGDEBUG;
+			si.si_code = SI_QUEUE;
+			si.si_int = reason | sigdebug_marker;
+			send_sig_info(SIGDEBUG, &si, p);
+		}
+		xnsynch_detect_boosted_relax(thread);
+	}
+
+	/*
+	 * "current" is now running into the Linux domain on behalf of
+	 * the root thread.
+	 */
+	xnthread_sync_window(thread);
+
+#ifdef CONFIG_SMP
+	if (xnthread_test_localinfo(thread, XNMOVED)) {
+		xnthread_clear_localinfo(thread, XNMOVED);
+		cpu = xnsched_cpu(thread->sched);
+		set_cpus_allowed_ptr(p, cpumask_of(cpu));
+	}
+#endif
+	/*
+	 * After migration there will be no syscall restart (rather a signal
+	 * delivery).
+	 */
+	xnthread_clear_localinfo(thread, XNSYSRST);
+
+	pipeline_clear_mayday();
+
+	trace_cobalt_shadow_relaxed(thread);
+}
+EXPORT_SYMBOL_GPL(xnthread_relax);
+
+static void lostage_task_signal(struct pipeline_inband_work *inband_work)
+{
+	struct lostage_signal *rq;
+	struct task_struct *p;
+	kernel_siginfo_t si;
+	int signo, sigval;
+	spl_t s;
+
+	rq = container_of(inband_work, struct lostage_signal, inband_work);
+	/*
+	 * Revisit: I-pipe requirement. It passes a copy of the original work
+	 * struct, so retrieve the original one first in order to update is.
+	 */
+	rq = rq->self;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	p = rq->task;
+	signo = rq->signo;
+	sigval = rq->sigval;
+	rq->task = NULL;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	trace_cobalt_lostage_signal(p, signo);
+
+	if (signo == SIGSHADOW || signo == SIGDEBUG) {
+		memset(&si, '\0', sizeof(si));
+		si.si_signo = signo;
+		si.si_code = SI_QUEUE;
+		si.si_int = sigval;
+		send_sig_info(signo, &si, p);
+	} else {
+		send_sig(signo, p, 1);
+	}
+}
+
+static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */
+{
+	int ret = 0;
+
+	if (xnthread_test_info(thread, XNKICKED))
+		return 1;
+
+	if (xnthread_unblock(thread)) {
+		xnthread_set_info(thread, XNKICKED);
+		ret = 1;
+	}
+
+	/*
+	 * CAUTION: we must NOT raise XNBREAK when clearing a forcible
+	 * block state, such as XNSUSP, XNHELD. The caller of
+	 * xnthread_suspend() we unblock shall proceed as for a normal
+	 * return, until it traverses a cancellation point if
+	 * XNCANCELD was raised earlier, or calls xnthread_suspend()
+	 * which will detect XNKICKED and act accordingly.
+	 *
+	 * Rationale: callers of xnthread_suspend() may assume that
+	 * receiving XNBREAK means that the process that motivated the
+	 * blocking did not go to completion. E.g. the wait context
+	 * (see. xnthread_prepare_wait()) was NOT posted before
+	 * xnsynch_sleep_on() returned, leaving no useful data there.
+	 * Therefore, in case only XNSUSP remains set for the thread
+	 * on entry to force_wakeup(), after XNPEND was lifted earlier
+	 * when the wait went to successful completion (i.e. no
+	 * timeout), then we want the kicked thread to know that it
+	 * did receive the requested resource, not finding XNBREAK in
+	 * its state word.
+	 *
+	 * Callers of xnthread_suspend() may inquire for XNKICKED to
+	 * detect forcible unblocks from XNSUSP, XNHELD, if they
+	 * should act upon this case specifically.
+	 */
+	if (xnthread_test_state(thread, XNSUSP|XNHELD)) {
+		xnthread_resume(thread, XNSUSP|XNHELD);
+		xnthread_set_info(thread, XNKICKED);
+	}
+
+	/*
+	 * Tricky cases:
+	 *
+	 * - a thread which was ready on entry wasn't actually
+	 * running, but nevertheless waits for the CPU in primary
+	 * mode, so we have to make sure that it will be notified of
+	 * the pending break condition as soon as it enters
+	 * xnthread_suspend() from a blocking Xenomai syscall.
+	 *
+	 * - a ready/readied thread on exit may be prevented from
+	 * running by the scheduling policy module it belongs
+	 * to. Typically, policies enforcing a runtime budget do not
+	 * block threads with no budget, but rather keep them out of
+	 * their run queue, so that ->sched_pick() won't elect
+	 * them. We tell the policy handler about the fact that we do
+	 * want such thread to run until it relaxes, whatever this
+	 * means internally for the implementation.
+	 */
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_kick(thread);
+
+	return ret;
+}
+
+void __xnthread_kick(struct xnthread *thread) /* nklock locked, irqs off */
+{
+	struct task_struct *p = xnthread_host_task(thread);
+
+	/* Thread is already relaxed -- nop. */
+	if (xnthread_test_state(thread, XNRELAX))
+		return;
+
+	/*
+	 * First, try to kick the thread out of any blocking syscall
+	 * Xenomai-wise. If that succeeds, then the thread will relax
+	 * on its return path to user-space.
+	 */
+	if (force_wakeup(thread))
+		return;
+
+	/*
+	 * If that did not work out because the thread was not blocked
+	 * (i.e. XNPEND/XNDELAY) in a syscall, then force a mayday
+	 * trap. Note that we don't want to send that thread any linux
+	 * signal, we only want to force it to switch to secondary
+	 * mode asap.
+	 *
+	 * It could happen that a thread is relaxed on a syscall
+	 * return path after it was resumed from self-suspension
+	 * (e.g. XNSUSP) then also forced to run a mayday trap right
+	 * after: this is still correct, at worst we would get a
+	 * useless mayday syscall leading to a no-op, no big deal.
+	 */
+	xnthread_set_info(thread, XNKICKED);
+
+	/*
+	 * We may send mayday signals to userland threads only.
+	 * However, no need to run a mayday trap if the current thread
+	 * kicks itself out of primary mode: it will relax on its way
+	 * back to userland via the current syscall
+	 * epilogue. Otherwise, we want that thread to enter the
+	 * mayday trap asap, to call us back for relaxing.
+	 */
+	if (thread != xnsched_current_thread() &&
+	    xnthread_test_state(thread, XNUSER))
+		pipeline_raise_mayday(p);
+}
+
+void xnthread_kick(struct xnthread *thread)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xnthread_kick(thread);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_kick);
+
+void __xnthread_demote(struct xnthread *thread) /* nklock locked, irqs off */
+{
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+
+	/*
+	 * First we kick the thread out of primary mode, and have it
+	 * resume execution immediately over the regular linux
+	 * context.
+	 */
+	__xnthread_kick(thread);
+
+	/*
+	 * Then we demote it, turning that thread into a non real-time
+	 * Xenomai shadow, which still has access to Xenomai
+	 * resources, but won't compete for real-time scheduling
+	 * anymore. In effect, moving the thread to a weak scheduling
+	 * class/priority will prevent it from sticking back to
+	 * primary mode.
+	 */
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	param.weak.prio = 0;
+	sched_class = &xnsched_class_weak;
+#else
+	param.rt.prio = 0;
+	sched_class = &xnsched_class_rt;
+#endif
+	__xnthread_set_schedparam(thread, sched_class, &param);
+}
+
+void xnthread_demote(struct xnthread *thread)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xnthread_demote(thread);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_demote);
+
+static int get_slot_index_from_sig(int sig, int arg)
+{
+	int action;
+
+	switch (sig) {
+	case SIGDEBUG:
+		return XNTHREAD_SIGDEBUG;
+	case SIGSHADOW:
+		action = sigshadow_action(arg);
+		switch (action) {
+		case SIGSHADOW_ACTION_HARDEN:
+			return XNTHREAD_SIGSHADOW_HARDEN;
+		case SIGSHADOW_ACTION_BACKTRACE:
+			return XNTHREAD_SIGSHADOW_BACKTRACE;
+		case SIGSHADOW_ACTION_HOME:
+			return XNTHREAD_SIGSHADOW_HOME;
+		}
+		break;
+	case SIGTERM:
+		return XNTHREAD_SIGTERM;
+	}
+
+	return -1;
+}
+
+/* nklock locked, irqs off */
+void __xnthread_signal(struct xnthread *thread, int sig, int arg)
+{
+	struct lostage_signal *sigwork;
+	int slot;
+
+	if (XENO_WARN_ON(COBALT, !xnthread_test_state(thread, XNUSER)))
+		return;
+
+	slot = get_slot_index_from_sig(sig, arg);
+	if (WARN_ON_ONCE(slot < 0))
+		return;
+
+	sigwork = &thread->sigarray[slot];
+	if (sigwork->task)
+		return;
+
+	sigwork->inband_work = (struct pipeline_inband_work)
+			PIPELINE_INBAND_WORK_INITIALIZER(*sigwork,
+							 lostage_task_signal);
+	sigwork->task = xnthread_host_task(thread);
+	sigwork->signo = sig;
+	sigwork->sigval = sig == SIGDEBUG ? arg | sigdebug_marker : arg;
+	sigwork->self = sigwork; /* Revisit: I-pipe requirement */
+
+	trace_cobalt_lostage_request("signal", sigwork->task);
+
+	pipeline_post_inband_work(sigwork);
+}
+
+void xnthread_signal(struct xnthread *thread, int sig, int arg)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xnthread_signal(thread, sig, arg);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_signal);
+
+void xnthread_pin_initial(struct xnthread *thread)
+{
+	struct task_struct *p = current;
+	struct xnsched *sched;
+	int cpu;
+	spl_t s;
+
+	/*
+	 * @thread is the Xenomai extension of the current kernel
+	 * task. If the current CPU is part of the affinity mask of
+	 * this thread, pin the latter on this CPU. Otherwise pin it
+	 * to the first CPU of that mask.
+	 */
+	cpu = task_cpu(p);
+	if (!cpumask_test_cpu(cpu, &thread->affinity))
+		cpu = cpumask_first(&thread->affinity);
+
+	set_cpus_allowed_ptr(p, cpumask_of(cpu));
+	/*
+	 * @thread is still unstarted Xenomai-wise, we are precisely
+	 * in the process of mapping the current kernel task to
+	 * it. Therefore xnthread_migrate_passive() is the right way
+	 * to pin it on a real-time CPU.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	sched = xnsched_struct(cpu);
+	xnthread_migrate_passive(thread, sched);
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+/* nklock locked, irqs off */
+void xnthread_call_mayday(struct xnthread *thread, int reason)
+{
+	struct task_struct *p = xnthread_host_task(thread);
+
+	/* Mayday traps are available to userland threads only. */
+	XENO_BUG_ON(COBALT, !xnthread_test_state(thread, XNUSER));
+	xnthread_set_info(thread, XNKICKED);
+	__xnthread_signal(thread, SIGDEBUG, reason);
+	pipeline_raise_mayday(p);
+}
+EXPORT_SYMBOL_GPL(xnthread_call_mayday);
+
+int xnthread_killall(int grace, int mask)
+{
+	struct xnthread *t, *curr = xnthread_current();
+	int nrkilled = 0, nrthreads, count;
+	long ret;
+	spl_t s;
+
+	secondary_mode_only();
+
+	/*
+	 * We may hold the core lock across calls to xnthread_cancel()
+	 * provided that we won't self-cancel.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	nrthreads = cobalt_nrthreads;
+
+	xnsched_for_each_thread(t) {
+		if (xnthread_test_state(t, XNROOT) ||
+		    xnthread_test_state(t, mask) != mask ||
+		    t == curr)
+			continue;
+
+		if (XENO_DEBUG(COBALT))
+			printk(XENO_INFO "terminating %s[%d]\n",
+			       t->name, xnthread_host_pid(t));
+		nrkilled++;
+		xnthread_cancel(t);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	/*
+	 * Cancel then join all existing threads during the grace
+	 * period. It is the caller's responsibility to prevent more
+	 * threads to bind to the system if required, we won't make
+	 * any provision for this here.
+	 */
+	count = nrthreads - nrkilled;
+	if (XENO_DEBUG(COBALT))
+		printk(XENO_INFO "waiting for %d threads to exit\n",
+		       nrkilled);
+
+	if (grace > 0) {
+		ret = wait_event_interruptible_timeout(join_all,
+						       cobalt_nrthreads == count,
+						       grace * HZ);
+		if (ret == 0)
+			return -EAGAIN;
+	} else
+		ret = wait_event_interruptible(join_all,
+					       cobalt_nrthreads == count);
+
+	/* Wait for a full RCU grace period to expire. */
+	wait_for_rcu_grace_period(NULL);
+
+	if (XENO_DEBUG(COBALT))
+		printk(XENO_INFO "joined %d threads\n",
+		       count + nrkilled - cobalt_nrthreads);
+
+	return ret < 0 ? -EINTR : 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_killall);
+
+/* Xenomai's generic personality. */
+struct xnthread_personality xenomai_personality = {
+	.name = "core",
+	.magic = -1
+};
+EXPORT_SYMBOL_GPL(xenomai_personality);
+
+/** @} */
+++ linux-patched/kernel/xenomai/Makefile	2022-03-21 12:58:28.829894422 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/arith.c	1970-01-01 01:00:00.000000000 +0100
+obj-$(CONFIG_XENOMAI) += pipeline/ xenomai.o rtdm/ posix/
+
+xenomai-y :=	arith.o 	\
+		bufd.o		\
+		clock.o		\
+		heap.o		\
+		init.o		\
+		lock.o		\
+		registry.o	\
+		sched-idle.o	\
+		sched-rt.o	\
+		sched.o		\
+		select.o	\
+		synch.o		\
+		thread.o	\
+		time.o		\
+		timer.o		\
+		tree.o
+
+xenomai-$(CONFIG_XENO_OPT_SCHED_QUOTA) += sched-quota.o
+xenomai-$(CONFIG_XENO_OPT_SCHED_WEAK) += sched-weak.o
+xenomai-$(CONFIG_XENO_OPT_SCHED_SPORADIC) += sched-sporadic.o
+xenomai-$(CONFIG_XENO_OPT_SCHED_TP) += sched-tp.o
+xenomai-$(CONFIG_XENO_OPT_DEBUG) += debug.o
+xenomai-$(CONFIG_XENO_OPT_PIPE) += pipe.o
+xenomai-$(CONFIG_XENO_OPT_MAP) += map.o
+xenomai-$(CONFIG_PROC_FS) += vfile.o procfs.o
+++ linux-patched/kernel/xenomai/arith.c	2022-03-21 12:58:28.825894461 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/tree.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_arith In-kernel arithmetics
+ *
+ * A collection of helpers performing arithmetics not implicitly
+ * available from kernel context via GCC helpers. Many of these
+ * routines enable 64bit arithmetics on 32bit systems. Xenomai
+ * architecture ports normally implement the performance critical ones
+ * in hand-crafted assembly code (see
+ * kernel/cobalt/arch/\<arch\>/include/asm/xenomai/uapi/arith.h).
+ * @{
+ */
+
+/**
+ * Architecture-independent div64 operation with remainder.
+ *
+ * @param a dividend
+ *
+ * @param b divisor
+ *
+ * @param rem if non-NULL, a pointer to a 64bit variable for
+ * collecting the remainder from the division.
+ */
+unsigned long long xnarch_generic_full_divmod64(unsigned long long a,
+						unsigned long long b,
+						unsigned long long *rem)
+{
+	unsigned long long q = 0, r = a;
+	int i;
+
+	for (i = fls(a >> 32) - fls(b >> 32), b <<= i; i >= 0; i--, b >>= 1) {
+		q <<= 1;
+		if (b <= r) {
+			r -= b;
+			q++;
+		}
+	}
+
+	if (rem)
+		*rem = r;
+	return q;
+}
+EXPORT_SYMBOL_GPL(xnarch_generic_full_divmod64);
+
+/** @} */
+++ linux-patched/kernel/xenomai/tree.c	2022-03-21 12:58:28.820894510 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-quota.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <cobalt/kernel/tree.h>
+
+void xntree_cleanup(struct rb_root *t, void *cookie,
+		void (*destroy)(void *cookie, struct xnid *id))
+{
+	struct rb_node *node, *next;
+
+	node = rb_first(t);
+	while (node) {
+		next = rb_next(node);
+
+		/* destroy is expected to remove the node from the rbtree */
+		destroy(cookie, container_of(node, struct xnid, link));
+
+		node = next;
+	}
+}
+
+int xnid_enter(struct rb_root *t, struct xnid *xnid, xnkey_t key)
+{
+	struct rb_node **new = &t->rb_node, *parent = NULL;
+
+	while (*new) {
+		struct xnid *i = container_of(*new, struct xnid, link);
+
+		parent = *new;
+		if (key < i->key)
+			new = &((*new)->rb_left);
+		else if (key > i->key)
+			new = &((*new)->rb_right);
+		else
+			return -EEXIST;
+	}
+
+	xnid->key = key;
+	rb_link_node(&xnid->link, parent, new);
+	rb_insert_color(&xnid->link, t);
+
+	return 0;
+}
+++ linux-patched/kernel/xenomai/sched-quota.c	2022-03-21 12:58:28.816894549 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/sched-rt.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/bitmap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/arith.h>
+#include <cobalt/uapi/sched.h>
+#include <trace/events/cobalt-core.h>
+
+/*
+ * With this policy, each per-CPU scheduler slot maintains a list of
+ * active thread groups, picking from the sched_rt runqueue.
+ *
+ * Each time a thread is picked from the runqueue, we check whether we
+ * still have budget for running it, looking at the group it belongs
+ * to. If so, a timer is armed to elapse when that group has no more
+ * budget, would the incoming thread run unpreempted until then
+ * (i.e. xnsched_quota->limit_timer).
+ *
+ * Otherwise, if no budget remains in the group for running the
+ * candidate thread, we move the latter to a local expiry queue
+ * maintained by the group. This process is done on the fly as we pull
+ * from the runqueue.
+ *
+ * Updating the remaining budget is done each time the Cobalt core
+ * asks for replacing the current thread with the next runnable one,
+ * i.e. xnsched_quota_pick(). There we charge the elapsed run time of
+ * the outgoing thread to the relevant group, and conversely, we check
+ * whether the incoming thread has budget.
+ *
+ * Finally, a per-CPU timer (xnsched_quota->refill_timer) periodically
+ * ticks in the background, in accordance to the defined quota
+ * interval. Thread group budgets get replenished by its handler in
+ * accordance to their respective share, pushing all expired threads
+ * back to the run queue in the same move.
+ *
+ * NOTE: since the core logic enforcing the budget entirely happens in
+ * xnsched_quota_pick(), applying a budget change can be done as
+ * simply as forcing the rescheduling procedure to be invoked asap. As
+ * a result of this, the Cobalt core will ask for the next thread to
+ * run, which means calling xnsched_quota_pick() eventually.
+ *
+ * CAUTION: xnsched_quota_group->nr_active does count both the threads
+ * from that group linked to the sched_rt runqueue, _and_ the threads
+ * moved to the local expiry queue. As a matter of fact, the expired
+ * threads - those for which we consumed all the per-group budget -
+ * are still seen as runnable (i.e. not blocked/suspended) by the
+ * Cobalt core. This only means that the SCHED_QUOTA policy won't pick
+ * them until the corresponding budget is replenished.
+ */
+static DECLARE_BITMAP(group_map, CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS);
+
+static inline int group_is_active(struct xnsched_quota_group *tg)
+{
+	struct xnthread *curr = tg->sched->curr;
+
+	if (tg->nr_active)
+		return 1;
+
+	/*
+	 * Check whether the current thread belongs to the group, and
+	 * is still in running state (XNREADY denotes a thread linked
+	 * to the runqueue, in which case tg->nr_active already
+	 * accounts for it).
+	 */
+	if (curr->quota == tg &&
+	    xnthread_test_state(curr, XNREADY|XNTHREAD_BLOCK_BITS) == 0)
+		return 1;
+
+	return 0;
+}
+
+static inline void replenish_budget(struct xnsched_quota *qs,
+				    struct xnsched_quota_group *tg)
+{
+	xnticks_t budget_ns, credit_ns;
+
+	if (tg->quota_ns == tg->quota_peak_ns) {
+		/*
+		 * Fast path: we don't accumulate runtime credit.
+		 * This includes groups with no runtime limit
+		 * (i.e. quota off: quota >= period && quota == peak).
+		 */
+		tg->run_budget_ns = tg->quota_ns;
+		return;
+	}
+
+	/*
+	 * We have to deal with runtime credit accumulation, as the
+	 * group may consume more than its base quota during a single
+	 * interval, up to a peak duration though (not to monopolize
+	 * the CPU).
+	 *
+	 * - In the simplest case, a group is allotted a new full
+	 * budget plus the unconsumed portion of the previous budget,
+	 * provided the sum does not exceed the peak quota.
+	 *
+	 * - When there is too much budget for a single interval
+	 * (i.e. above peak quota), we spread the extra time over
+	 * multiple intervals through a credit accumulation mechanism.
+	 *
+	 * - The accumulated credit is dropped whenever a group has no
+	 * runnable threads.
+	 */
+	if (!group_is_active(tg)) {
+		/* Drop accumulated credit. */
+		tg->run_credit_ns = 0;
+		tg->run_budget_ns = tg->quota_ns;
+		return;
+	}
+
+	budget_ns = tg->run_budget_ns + tg->quota_ns;
+	if (budget_ns > tg->quota_peak_ns) {
+		/* Too much budget, spread it over intervals. */
+		tg->run_credit_ns += budget_ns - tg->quota_peak_ns;
+		tg->run_budget_ns = tg->quota_peak_ns;
+	} else if (tg->run_credit_ns) {
+		credit_ns = tg->quota_peak_ns - budget_ns;
+		/* Consume the accumulated credit. */
+		if (tg->run_credit_ns >= credit_ns)
+			tg->run_credit_ns -= credit_ns;
+		else {
+			credit_ns = tg->run_credit_ns;
+			tg->run_credit_ns = 0;
+		}
+		/* Allot extended budget, limited to peak quota. */
+		tg->run_budget_ns = budget_ns + credit_ns;
+	} else
+		/* No credit, budget was below peak quota. */
+		tg->run_budget_ns = budget_ns;
+}
+
+static void quota_refill_handler(struct xntimer *timer)
+{
+	struct xnsched_quota_group *tg;
+	struct xnthread *thread, *tmp;
+	struct xnsched_quota *qs;
+	struct xnsched *sched;
+
+	qs = container_of(timer, struct xnsched_quota, refill_timer);
+	XENO_BUG_ON(COBALT, list_empty(&qs->groups));
+	sched = container_of(qs, struct xnsched, quota);
+
+	trace_cobalt_schedquota_refill(0);
+
+	list_for_each_entry(tg, &qs->groups, next) {
+		/* Allot a new runtime budget for the group. */
+		replenish_budget(qs, tg);
+
+		if (tg->run_budget_ns == 0 || list_empty(&tg->expired))
+			continue;
+		/*
+		 * For each group living on this CPU, move all expired
+		 * threads back to the runqueue. Since those threads
+		 * were moved out of the runqueue as we were
+		 * considering them for execution, we push them back
+		 * in LIFO order to their respective priority group.
+		 * The expiry queue is FIFO to keep ordering right
+		 * among expired threads.
+		 */
+		list_for_each_entry_safe_reverse(thread, tmp, &tg->expired, quota_expired) {
+			list_del_init(&thread->quota_expired);
+			xnsched_addq(&sched->rt.runnable, thread);
+		}
+	}
+
+	xnsched_set_self_resched(timer->sched);
+}
+
+static void quota_limit_handler(struct xntimer *timer)
+{
+	struct xnsched *sched;
+
+	sched = container_of(timer, struct xnsched, quota.limit_timer);
+	/*
+	 * Force a rescheduling on the return path of the current
+	 * interrupt, so that the budget is re-evaluated for the
+	 * current group in xnsched_quota_pick().
+	 */
+	xnsched_set_self_resched(sched);
+}
+
+static int quota_sum_all(struct xnsched_quota *qs)
+{
+	struct xnsched_quota_group *tg;
+	int sum;
+
+	if (list_empty(&qs->groups))
+		return 0;
+
+	sum = 0;
+	list_for_each_entry(tg, &qs->groups, next)
+		sum += tg->quota_percent;
+
+	return sum;
+}
+
+static void xnsched_quota_init(struct xnsched *sched)
+{
+	char limiter_name[XNOBJECT_NAME_LEN], refiller_name[XNOBJECT_NAME_LEN];
+	struct xnsched_quota *qs = &sched->quota;
+
+	qs->period_ns = CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD * 1000ULL;
+	INIT_LIST_HEAD(&qs->groups);
+
+#ifdef CONFIG_SMP
+	ksformat(refiller_name, sizeof(refiller_name),
+		 "[quota-refill/%u]", sched->cpu);
+	ksformat(limiter_name, sizeof(limiter_name),
+		 "[quota-limit/%u]", sched->cpu);
+#else
+	strcpy(refiller_name, "[quota-refill]");
+	strcpy(limiter_name, "[quota-limit]");
+#endif
+	xntimer_init(&qs->refill_timer,
+		     &nkclock, quota_refill_handler, sched,
+		     XNTIMER_IGRAVITY);
+	xntimer_set_name(&qs->refill_timer, refiller_name);
+
+	xntimer_init(&qs->limit_timer,
+		     &nkclock, quota_limit_handler, sched,
+		     XNTIMER_IGRAVITY);
+	xntimer_set_name(&qs->limit_timer, limiter_name);
+}
+
+static bool xnsched_quota_setparam(struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	struct xnsched_quota_group *tg;
+	struct xnsched_quota *qs;
+	bool effective;
+
+	xnthread_clear_state(thread, XNWEAK);
+	effective = xnsched_set_effective_priority(thread, p->quota.prio);
+
+	qs = &thread->sched->quota;
+	list_for_each_entry(tg, &qs->groups, next) {
+		if (tg->tgid != p->quota.tgid)
+			continue;
+		if (thread->quota) {
+			/* Dequeued earlier by our caller. */
+			list_del(&thread->quota_next);
+			thread->quota->nr_threads--;
+		}
+
+		trace_cobalt_schedquota_add_thread(tg, thread);
+
+		thread->quota = tg;
+		list_add(&thread->quota_next, &tg->members);
+		tg->nr_threads++;
+		return effective;
+	}
+
+	XENO_BUG(COBALT);
+
+	return false;
+}
+
+static void xnsched_quota_getparam(struct xnthread *thread,
+				   union xnsched_policy_param *p)
+{
+	p->quota.prio = thread->cprio;
+	p->quota.tgid = thread->quota->tgid;
+}
+
+static void xnsched_quota_trackprio(struct xnthread *thread,
+				    const union xnsched_policy_param *p)
+{
+	if (p) {
+		/* We should not cross groups during PI boost. */
+		XENO_WARN_ON(COBALT,
+			     thread->base_class == &xnsched_class_quota &&
+			     thread->quota->tgid != p->quota.tgid);
+		thread->cprio = p->quota.prio;
+	} else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_quota_protectprio(struct xnthread *thread, int prio)
+{
+	if (prio > XNSCHED_QUOTA_MAX_PRIO)
+		prio = XNSCHED_QUOTA_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_quota_chkparam(struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	struct xnsched_quota_group *tg;
+	struct xnsched_quota *qs;
+	int tgid;
+
+	if (p->quota.prio < XNSCHED_QUOTA_MIN_PRIO ||
+	    p->quota.prio > XNSCHED_QUOTA_MAX_PRIO)
+		return -EINVAL;
+
+	tgid = p->quota.tgid;
+	if (tgid < 0 || tgid >= CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS)
+		return -EINVAL;
+
+	/*
+	 * The group must be managed on the same CPU the thread
+	 * currently runs on.
+	 */
+	qs = &thread->sched->quota;
+	list_for_each_entry(tg, &qs->groups, next) {
+		if (tg->tgid == tgid)
+			return 0;
+	}
+
+	/*
+	 * If that group exists nevertheless, we give userland a
+	 * specific error code.
+	 */
+	if (test_bit(tgid, group_map))
+		return -EPERM;
+
+	return -EINVAL;
+}
+
+static void xnsched_quota_forget(struct xnthread *thread)
+{
+	trace_cobalt_schedquota_remove_thread(thread->quota, thread);
+
+	thread->quota->nr_threads--;
+	XENO_BUG_ON(COBALT, thread->quota->nr_threads < 0);
+	list_del(&thread->quota_next);
+	thread->quota = NULL;
+}
+
+static void xnsched_quota_kick(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	/*
+	 * Allow a kicked thread to be elected for running until it
+	 * relaxes, even if the group it belongs to lacks runtime
+	 * budget.
+	 */
+	if (tg->run_budget_ns == 0 && !list_empty(&thread->quota_expired)) {
+		list_del_init(&thread->quota_expired);
+		xnsched_addq_tail(&sched->rt.runnable, thread);
+	}
+}
+
+static inline int thread_is_runnable(struct xnthread *thread)
+{
+	return thread->quota->run_budget_ns > 0 ||
+		xnthread_test_info(thread, XNKICKED);
+}
+
+static void xnsched_quota_enqueue(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	if (!thread_is_runnable(thread))
+		list_add_tail(&thread->quota_expired, &tg->expired);
+	else
+		xnsched_addq_tail(&sched->rt.runnable, thread);
+
+	tg->nr_active++;
+}
+
+static void xnsched_quota_dequeue(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	if (!list_empty(&thread->quota_expired))
+		list_del_init(&thread->quota_expired);
+	else
+		xnsched_delq(&sched->rt.runnable, thread);
+
+	tg->nr_active--;
+}
+
+static void xnsched_quota_requeue(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	if (!thread_is_runnable(thread))
+		list_add(&thread->quota_expired, &tg->expired);
+	else
+		xnsched_addq(&sched->rt.runnable, thread);
+
+	tg->nr_active++;
+}
+
+static struct xnthread *xnsched_quota_pick(struct xnsched *sched)
+{
+	struct xnthread *next, *curr = sched->curr;
+	struct xnsched_quota *qs = &sched->quota;
+	struct xnsched_quota_group *otg, *tg;
+	xnticks_t now, elapsed;
+	int ret;
+
+	now = xnclock_read_monotonic(&nkclock);
+	otg = curr->quota;
+	if (otg == NULL)
+		goto pick;
+	/*
+	 * Charge the time consumed by the outgoing thread to the
+	 * group it belongs to.
+	 */
+	elapsed = now - otg->run_start_ns;
+	if (elapsed < otg->run_budget_ns)
+		otg->run_budget_ns -= elapsed;
+	else
+		otg->run_budget_ns = 0;
+pick:
+	next = xnsched_getq(&sched->rt.runnable);
+	if (next == NULL) {
+		xntimer_stop(&qs->limit_timer);
+		return NULL;
+	}
+
+	/*
+	 * As we basically piggyback on the SCHED_FIFO runqueue, make
+	 * sure to detect non-quota threads.
+	 */
+	tg = next->quota;
+	if (tg == NULL)
+		return next;
+
+	tg->run_start_ns = now;
+
+	/*
+	 * Don't consider budget if kicked, we have to allow this
+	 * thread to run until it eventually relaxes.
+	 */
+	if (xnthread_test_info(next, XNKICKED)) {
+		xntimer_stop(&qs->limit_timer);
+		goto out;
+	}
+
+	if (tg->run_budget_ns == 0) {
+		/* Flush expired group members as we go. */
+		list_add_tail(&next->quota_expired, &tg->expired);
+		goto pick;
+	}
+
+	if (otg == tg && xntimer_running_p(&qs->limit_timer))
+		/* Same group, leave the running timer untouched. */
+		goto out;
+
+	/* Arm limit timer for the new running group. */
+	ret = xntimer_start(&qs->limit_timer, now + tg->run_budget_ns,
+			    XN_INFINITE, XN_ABSOLUTE);
+	if (ret) {
+		/* Budget exhausted: deactivate this group. */
+		tg->run_budget_ns = 0;
+		list_add_tail(&next->quota_expired, &tg->expired);
+		goto pick;
+	}
+out:
+	tg->nr_active--;
+
+	return next;
+}
+
+static void xnsched_quota_migrate(struct xnthread *thread, struct xnsched *sched)
+{
+	union xnsched_policy_param param;
+	/*
+	 * Runtime quota groups are defined per-CPU, so leaving the
+	 * current CPU means exiting the group. We do this by moving
+	 * the target thread to the plain RT class.
+	 */
+	param.rt.prio = thread->cprio;
+	__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+}
+
+/**
+ * @ingroup cobalt_core_sched
+ * @defgroup sched_quota SCHED_QUOTA scheduling policy
+ *
+ * The SCHED_QUOTA policy enforces a limitation on the CPU consumption
+ * of threads over a globally defined period, known as the quota
+ * interval. This is done by pooling threads with common requirements
+ * in groups, and giving each group a share of the global period
+ * (CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD).
+ *
+ * When threads have entirely consumed the quota allotted to the group
+ * they belong to, the latter is suspended as a whole, until the next
+ * quota interval starts. At this point, a new runtime budget is
+ * given to each group, in accordance with its share.
+ *
+ *@{
+ */
+int xnsched_quota_create_group(struct xnsched_quota_group *tg,
+			       struct xnsched *sched,
+			       int *quota_sum_r)
+{
+	int tgid, nr_groups = CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS;
+	struct xnsched_quota *qs = &sched->quota;
+
+	atomic_only();
+
+	tgid = find_first_zero_bit(group_map, nr_groups);
+	if (tgid >= nr_groups)
+		return -ENOSPC;
+
+	__set_bit(tgid, group_map);
+	tg->tgid = tgid;
+	tg->sched = sched;
+	tg->run_budget_ns = qs->period_ns;
+	tg->run_credit_ns = 0;
+	tg->quota_percent = 100;
+	tg->quota_peak_percent = 100;
+	tg->quota_ns = qs->period_ns;
+	tg->quota_peak_ns = qs->period_ns;
+	tg->nr_active = 0;
+	tg->nr_threads = 0;
+	INIT_LIST_HEAD(&tg->members);
+	INIT_LIST_HEAD(&tg->expired);
+
+	trace_cobalt_schedquota_create_group(tg);
+
+	if (list_empty(&qs->groups))
+		xntimer_start(&qs->refill_timer,
+			      qs->period_ns, qs->period_ns, XN_RELATIVE);
+
+	list_add(&tg->next, &qs->groups);
+	*quota_sum_r = quota_sum_all(qs);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_create_group);
+
+int xnsched_quota_destroy_group(struct xnsched_quota_group *tg,
+				int force, int *quota_sum_r)
+{
+	struct xnsched_quota *qs = &tg->sched->quota;
+	union xnsched_policy_param param;
+	struct xnthread *thread, *tmp;
+
+	atomic_only();
+
+	if (!list_empty(&tg->members)) {
+		if (!force)
+			return -EBUSY;
+		/* Move group members to the rt class. */
+		list_for_each_entry_safe(thread, tmp, &tg->members, quota_next) {
+			param.rt.prio = thread->cprio;
+			__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+		}
+	}
+
+	trace_cobalt_schedquota_destroy_group(tg);
+
+	list_del(&tg->next);
+	__clear_bit(tg->tgid, group_map);
+
+	if (list_empty(&qs->groups))
+		xntimer_stop(&qs->refill_timer);
+
+	if (quota_sum_r)
+		*quota_sum_r = quota_sum_all(qs);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_destroy_group);
+
+void xnsched_quota_set_limit(struct xnsched_quota_group *tg,
+			     int quota_percent, int quota_peak_percent,
+			     int *quota_sum_r)
+{
+	struct xnsched *sched = tg->sched;
+	struct xnsched_quota *qs = &sched->quota;
+	xnticks_t old_quota_ns = tg->quota_ns;
+	struct xnthread *thread, *tmp, *curr;
+	xnticks_t now, elapsed, consumed;
+
+	atomic_only();
+
+	trace_cobalt_schedquota_set_limit(tg, quota_percent,
+					  quota_peak_percent);
+
+	if (quota_percent < 0 || quota_percent > 100) { /* Quota off. */
+		quota_percent = 100;
+		tg->quota_ns = qs->period_ns;
+	} else
+		tg->quota_ns = xnarch_div64(qs->period_ns * quota_percent, 100);
+
+	if (quota_peak_percent < quota_percent)
+		quota_peak_percent = quota_percent;
+
+	if (quota_peak_percent < 0 || quota_peak_percent > 100) {
+		quota_peak_percent = 100;
+		tg->quota_peak_ns = qs->period_ns;
+	} else
+		tg->quota_peak_ns = xnarch_div64(qs->period_ns * quota_peak_percent, 100);
+
+	tg->quota_percent = quota_percent;
+	tg->quota_peak_percent = quota_peak_percent;
+
+	curr = sched->curr;
+	if (curr->quota == tg &&
+	    xnthread_test_state(curr, XNREADY|XNTHREAD_BLOCK_BITS) == 0) {
+		now = xnclock_read_monotonic(&nkclock);
+
+		elapsed = now - tg->run_start_ns;
+		if (elapsed < tg->run_budget_ns)
+			tg->run_budget_ns -= elapsed;
+		else
+			tg->run_budget_ns = 0;
+
+		tg->run_start_ns = now;
+
+		xntimer_stop(&qs->limit_timer);
+	}
+
+	if (tg->run_budget_ns <= old_quota_ns)
+		consumed = old_quota_ns - tg->run_budget_ns;
+	else
+		consumed = 0;
+	if (tg->quota_ns >= consumed)
+		tg->run_budget_ns = tg->quota_ns - consumed;
+	else
+		tg->run_budget_ns = 0;
+
+	tg->run_credit_ns = 0;	/* Drop accumulated credit. */
+
+	*quota_sum_r = quota_sum_all(qs);
+
+	if (tg->run_budget_ns > 0) {
+		list_for_each_entry_safe_reverse(thread, tmp, &tg->expired,
+						 quota_expired) {
+			list_del_init(&thread->quota_expired);
+			xnsched_addq(&sched->rt.runnable, thread);
+		}
+	}
+
+	/*
+	 * Apply the new budget immediately, in case a member of this
+	 * group is currently running.
+	 */
+	xnsched_set_resched(sched);
+	xnsched_run();
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_set_limit);
+
+struct xnsched_quota_group *
+xnsched_quota_find_group(struct xnsched *sched, int tgid)
+{
+	struct xnsched_quota_group *tg;
+
+	atomic_only();
+
+	if (list_empty(&sched->quota.groups))
+		return NULL;
+
+	list_for_each_entry(tg, &sched->quota.groups, next) {
+		if (tg->tgid == tgid)
+			return tg;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_find_group);
+
+int xnsched_quota_sum_all(struct xnsched *sched)
+{
+	struct xnsched_quota *qs = &sched->quota;
+
+	atomic_only();
+
+	return quota_sum_all(qs);
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_sum_all);
+
+/** @} */
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_quota_vfroot;
+
+struct vfile_sched_quota_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_quota_data {
+	int cpu;
+	pid_t pid;
+	int prio;
+	int tgid;
+	xnticks_t budget;
+	char name[XNOBJECT_NAME_LEN];
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_quota_ops;
+
+static struct xnvfile_snapshot vfile_sched_quota = {
+	.privsz = sizeof(struct vfile_sched_quota_priv),
+	.datasz = sizeof(struct vfile_sched_quota_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_quota_ops,
+};
+
+static int vfile_sched_quota_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_quota_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_quota.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_quota_next(struct xnvfile_snapshot_iterator *it,
+				  void *data)
+{
+	struct vfile_sched_quota_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_quota_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_quota)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->tgid = thread->quota->tgid;
+	p->prio = thread->cprio;
+	p->budget = thread->quota->run_budget_ns;
+
+	return 1;
+}
+
+static int vfile_sched_quota_show(struct xnvfile_snapshot_iterator *it,
+				  void *data)
+{
+	struct vfile_sched_quota_data *p = data;
+	char buf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-4s %-4s %-10s %s\n",
+			       "CPU", "PID", "TGID", "PRI", "BUDGET", "NAME");
+	else {
+		xntimer_format_time(p->budget, buf, sizeof(buf));
+		xnvfile_printf(it, "%3u  %-6d %-4d %-4d %-10s %s\n",
+			       p->cpu,
+			       p->pid,
+			       p->tgid,
+			       p->prio,
+			       buf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_quota_ops = {
+	.rewind = vfile_sched_quota_rewind,
+	.next = vfile_sched_quota_next,
+	.show = vfile_sched_quota_show,
+};
+
+static int xnsched_quota_init_vfile(struct xnsched_class *schedclass,
+				    struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_quota_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_quota,
+				     &sched_quota_vfroot);
+}
+
+static void xnsched_quota_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_quota);
+	xnvfile_destroy_dir(&sched_quota_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_quota = {
+	.sched_init		=	xnsched_quota_init,
+	.sched_enqueue		=	xnsched_quota_enqueue,
+	.sched_dequeue		=	xnsched_quota_dequeue,
+	.sched_requeue		=	xnsched_quota_requeue,
+	.sched_pick		=	xnsched_quota_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_migrate		=	xnsched_quota_migrate,
+	.sched_chkparam		=	xnsched_quota_chkparam,
+	.sched_setparam		=	xnsched_quota_setparam,
+	.sched_getparam		=	xnsched_quota_getparam,
+	.sched_trackprio	=	xnsched_quota_trackprio,
+	.sched_protectprio	=	xnsched_quota_protectprio,
+	.sched_forget		=	xnsched_quota_forget,
+	.sched_kick		=	xnsched_quota_kick,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_quota_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_quota_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(3),
+	.policy			=	SCHED_QUOTA,
+	.name			=	"quota"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_quota);
+++ linux-patched/kernel/xenomai/sched-rt.c	2022-03-21 12:58:28.810894607 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/time.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+
+static void xnsched_rt_init(struct xnsched *sched)
+{
+	xnsched_initq(&sched->rt.runnable);
+}
+
+static void xnsched_rt_requeue(struct xnthread *thread)
+{
+	/*
+	 * Put back at same place: i.e. requeue to head of current
+	 * priority group (i.e. LIFO, used for preemption handling).
+	 */
+	__xnsched_rt_requeue(thread);
+}
+
+static void xnsched_rt_enqueue(struct xnthread *thread)
+{
+	/*
+	 * Enqueue for next pick: i.e. move to end of current priority
+	 * group (i.e. FIFO).
+	 */
+	__xnsched_rt_enqueue(thread);
+}
+
+static void xnsched_rt_dequeue(struct xnthread *thread)
+{
+	/*
+	 * Pull from the runnable thread queue.
+	 */
+	__xnsched_rt_dequeue(thread);
+}
+
+static void xnsched_rt_rotate(struct xnsched *sched,
+			      const union xnsched_policy_param *p)
+{
+	struct xnthread *thread, *curr;
+
+	if (xnsched_emptyq_p(&sched->rt.runnable))
+		return;	/* No runnable thread in this class. */
+
+	curr = sched->curr;
+
+	if (p->rt.prio == XNSCHED_RUNPRIO)
+		thread = curr;
+	else {
+		thread = xnsched_findq(&sched->rt.runnable, p->rt.prio);
+		if (thread == NULL)
+			return;
+	}
+
+	/*
+	 * In case we picked the current thread, we have to make sure
+	 * not to move it back to the run queue if it was blocked
+	 * before we were called. The same goes if the current thread
+	 * holds the scheduler lock.
+	 */
+	if (thread != curr ||
+	    (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS) &&
+	     curr->lock_count == 0))
+		xnsched_putback(thread);
+}
+
+void xnsched_rt_tick(struct xnsched *sched)
+{
+	/*
+	 * The round-robin time credit is only consumed by a running
+	 * thread that neither holds the scheduler lock nor was
+	 * blocked before entering this callback. As the time slice is
+	 * exhausted for the running thread, move it back to the
+	 * run queue at the end of its priority group.
+	 */
+	xnsched_putback(sched->curr);
+}
+
+static bool xnsched_rt_setparam(struct xnthread *thread,
+				const union xnsched_policy_param *p)
+{
+	return __xnsched_rt_setparam(thread, p);
+}
+
+static void xnsched_rt_getparam(struct xnthread *thread,
+				union xnsched_policy_param *p)
+{
+	__xnsched_rt_getparam(thread, p);
+}
+
+static void xnsched_rt_trackprio(struct xnthread *thread,
+				 const union xnsched_policy_param *p)
+{
+	__xnsched_rt_trackprio(thread, p);
+}
+
+static void xnsched_rt_protectprio(struct xnthread *thread, int prio)
+{
+	__xnsched_rt_protectprio(thread, prio);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_rt_vfroot;
+
+struct vfile_sched_rt_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_rt_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	xnticks_t period;
+	int cprio;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_rt_ops;
+
+static struct xnvfile_snapshot vfile_sched_rt = {
+	.privsz = sizeof(struct vfile_sched_rt_priv),
+	.datasz = sizeof(struct vfile_sched_rt_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_rt_ops,
+};
+
+static int vfile_sched_rt_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_rt_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_rt.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_rt_next(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_rt_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_rt_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_rt ||
+	    xnthread_test_state(thread, XNWEAK))
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->cprio = thread->cprio;
+	p->period = xnthread_get_period(thread);
+
+	return 1;
+}
+
+static int vfile_sched_rt_show(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_rt_data *p = data;
+	char pribuf[16], ptbuf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-8s %-10s %s\n",
+			       "CPU", "PID", "PRI", "PERIOD", "NAME");
+	else {
+		ksformat(pribuf, sizeof(pribuf), "%3d", p->cprio);
+		xntimer_format_time(p->period, ptbuf, sizeof(ptbuf));
+		xnvfile_printf(it, "%3u  %-6d %-8s %-10s %s\n",
+			       p->cpu,
+			       p->pid,
+			       pribuf,
+			       ptbuf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_rt_ops = {
+	.rewind = vfile_sched_rt_rewind,
+	.next = vfile_sched_rt_next,
+	.show = vfile_sched_rt_show,
+};
+
+static int xnsched_rt_init_vfile(struct xnsched_class *schedclass,
+				 struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_rt_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_rt,
+				     &sched_rt_vfroot);
+}
+
+static void xnsched_rt_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_rt);
+	xnvfile_destroy_dir(&sched_rt_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_rt = {
+	.sched_init		=	xnsched_rt_init,
+	.sched_enqueue		=	xnsched_rt_enqueue,
+	.sched_dequeue		=	xnsched_rt_dequeue,
+	.sched_requeue		=	xnsched_rt_requeue,
+	.sched_pick		=	xnsched_rt_pick,
+	.sched_tick		=	xnsched_rt_tick,
+	.sched_rotate		=	xnsched_rt_rotate,
+	.sched_forget		=	NULL,
+	.sched_kick		=	NULL,
+	.sched_declare		=	NULL,
+	.sched_setparam		=	xnsched_rt_setparam,
+	.sched_trackprio	=	xnsched_rt_trackprio,
+	.sched_protectprio	=	xnsched_rt_protectprio,
+	.sched_getparam		=	xnsched_rt_getparam,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_rt_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_rt_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(4),
+	.policy			=	SCHED_FIFO,
+	.name			=	"rt"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_rt);
+++ linux-patched/kernel/xenomai/time.c	2022-03-21 12:58:28.803894676 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/clock.c	1970-01-01 01:00:00.000000000 +0100
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm-generic/xenomai/syscall.h>
+#include <cobalt/kernel/time.h>
+#include <linux/compat.h>
+
+int cobalt_get_timespec64(struct timespec64 *ts,
+			  const struct __kernel_timespec __user *uts)
+{
+	struct __kernel_timespec kts;
+	int ret;
+
+	ret = cobalt_copy_from_user(&kts, uts, sizeof(kts));
+	if (ret)
+		return -EFAULT;
+
+	ts->tv_sec = kts.tv_sec;
+
+	/* Zero out the padding in compat mode */
+	if (in_compat_syscall())
+		kts.tv_nsec &= 0xFFFFFFFFUL;
+
+	/* In 32-bit mode, this drops the padding */
+	ts->tv_nsec = kts.tv_nsec;
+
+	return 0;
+}
+
+int cobalt_put_timespec64(const struct timespec64 *ts,
+		   struct __kernel_timespec __user *uts)
+{
+	struct __kernel_timespec kts = {
+		.tv_sec = ts->tv_sec,
+		.tv_nsec = ts->tv_nsec
+	};
+
+	return cobalt_copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
+}
+++ linux-patched/kernel/xenomai/clock.c	2022-03-21 12:58:28.792894783 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/registry.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006-2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/percpu.h>
+#include <linux/errno.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/arith.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/uapi/time.h>
+#include <asm/xenomai/calibration.h>
+#include <trace/events/cobalt-core.h>
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_clock Clock services
+ *
+ * @{
+ */
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+
+static struct xnarch_u32frac bln_frac;
+
+unsigned long long xnclock_divrem_billion(unsigned long long value,
+					  unsigned long *rem)
+{
+	unsigned long long q;
+	unsigned r;
+
+	q = xnarch_nodiv_ullimd(value, bln_frac.frac, bln_frac.integ);
+	r = value - q * 1000000000;
+	if (r >= 1000000000) {
+		++q;
+		r -= 1000000000;
+	}
+	*rem = r;
+	return q;
+}
+
+#else
+
+unsigned long long xnclock_divrem_billion(unsigned long long value,
+					  unsigned long *rem)
+{
+	return xnarch_ulldiv(value, 1000000000, rem);
+
+}
+
+#endif /* !XNARCH_HAVE_NODIV_LLIMD */
+
+EXPORT_SYMBOL_GPL(xnclock_divrem_billion);
+
+DEFINE_PRIVATE_XNLOCK(ratelimit_lock);
+
+int __xnclock_ratelimit(struct xnclock_ratelimit_state *rs, const char *func)
+{
+	spl_t s;
+	int ret;
+
+	if (!rs->interval)
+		return 1;
+
+	xnlock_get_irqsave(&ratelimit_lock, s);
+
+	if (!rs->begin)
+		rs->begin = xnclock_read_realtime(&nkclock);
+	if (xnclock_read_realtime(&nkclock) >= rs->begin + rs->interval) {
+		if (rs->missed)
+			printk(KERN_WARNING "%s: %d callbacks suppressed\n",
+			       func, rs->missed);
+		rs->begin   = 0;
+		rs->printed = 0;
+		rs->missed  = 0;
+	}
+	if (rs->burst && rs->burst > rs->printed) {
+		rs->printed++;
+		ret = 1;
+	} else {
+		rs->missed++;
+		ret = 0;
+	}
+	xnlock_put_irqrestore(&ratelimit_lock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__xnclock_ratelimit);
+
+void xnclock_core_local_shot(struct xnsched *sched)
+{
+	struct xntimerdata *tmd;
+	struct xntimer *timer;
+	xnsticks_t delay;
+	xntimerh_t *h;
+
+	/*
+	 * Do not reprogram locally when inside the tick handler -
+	 * will be done on exit anyway. Also exit if there is no
+	 * pending timer.
+	 */
+	if (sched->status & XNINTCK)
+		return;
+
+	/*
+	 * Assume the core clock device always has percpu semantics in
+	 * SMP.
+	 */
+	tmd = xnclock_this_timerdata(&nkclock);
+	h = xntimerq_head(&tmd->q);
+	if (h == NULL) {
+		sched->lflags |= XNIDLE;
+		return;
+	}
+
+	/*
+	 * Here we try to defer the host tick heading the timer queue,
+	 * so that it does not preempt a real-time activity uselessly,
+	 * in two cases:
+	 *
+	 * 1) a rescheduling is pending for the current CPU. We may
+	 * assume that a real-time thread is about to resume, so we
+	 * want to move the host tick out of the way until the host
+	 * kernel resumes, unless there is no other outstanding
+	 * timers.
+	 *
+	 * 2) the current thread is running in primary mode, in which
+	 * case we may also defer the host tick until the host kernel
+	 * resumes.
+	 *
+	 * The host tick deferral is cleared whenever Xenomai is about
+	 * to yield control to the host kernel (see ___xnsched_run()),
+	 * or a timer with an earlier timeout date is scheduled,
+	 * whichever comes first.
+	 */
+	sched->lflags &= ~(XNHDEFER|XNIDLE|XNTSTOP);
+	timer = container_of(h, struct xntimer, aplink);
+	if (unlikely(timer == &sched->htimer)) {
+		if (xnsched_resched_p(sched) ||
+		    !xnthread_test_state(sched->curr, XNROOT)) {
+			h = xntimerq_second(&tmd->q, h);
+			if (h) {
+				sched->lflags |= XNHDEFER;
+				timer = container_of(h, struct xntimer, aplink);
+			}
+		}
+	}
+
+	delay = xntimerh_date(&timer->aplink) - xnclock_core_read_raw();
+	if (delay < 0)
+		delay = 0;
+	else if (delay > ULONG_MAX)
+		delay = ULONG_MAX;
+
+	xntrace_tick((unsigned)delay);
+
+	pipeline_set_timer_shot(delay);
+}
+
+#ifdef CONFIG_SMP
+void xnclock_core_remote_shot(struct xnsched *sched)
+{
+	pipeline_send_timer_ipi(cpumask_of(xnsched_cpu(sched)));
+}
+#endif
+
+static void adjust_timer(struct xntimer *timer, xntimerq_t *q,
+			 xnsticks_t delta)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+	xnticks_t period, div;
+	xnsticks_t diff;
+
+	xntimerh_date(&timer->aplink) -= delta;
+
+	if (xntimer_periodic_p(timer) == 0)
+		goto enqueue;
+
+	timer->start_date -= delta;
+	period = xntimer_interval(timer);
+	diff = xnclock_ticks_to_ns(clock,
+		xnclock_read_raw(clock) - xntimer_expiry(timer));
+
+	if ((xnsticks_t)(diff - period) >= 0) {
+		/*
+		 * Timer should tick several times before now, instead
+		 * of calling timer->handler several times, we change
+		 * the timer date without changing its pexpect, so
+		 * that timer will tick only once and the lost ticks
+		 * will be counted as overruns.
+		 */
+		div = xnarch_div64(diff, period);
+		timer->periodic_ticks += div;
+		xntimer_update_date(timer);
+	} else if (delta < 0
+		   && (timer->status & XNTIMER_FIRED)
+		   && (xnsticks_t) (diff + period) <= 0) {
+		/*
+		 * Timer is periodic and NOT waiting for its first
+		 * shot, so we make it tick sooner than its original
+		 * date in order to avoid the case where by adjusting
+		 * time to a sooner date, real-time periodic timers do
+		 * not tick until the original date has passed.
+		 */
+		div = xnarch_div64(-diff, period);
+		timer->periodic_ticks -= div;
+		timer->pexpect_ticks -= div;
+		xntimer_update_date(timer);
+	}
+
+enqueue:
+	xntimer_enqueue(timer, q);
+}
+
+void xnclock_apply_offset(struct xnclock *clock, xnsticks_t delta_ns)
+{
+	struct xntimer *timer, *tmp;
+	struct list_head adjq;
+	struct xnsched *sched;
+	xnsticks_t delta;
+	xntimerq_it_t it;
+	unsigned int cpu;
+	xntimerh_t *h;
+	xntimerq_t *q;
+
+	atomic_only();
+
+	/*
+	 * The (real-time) epoch just changed for the clock. Since
+	 * timeout dates of timers are expressed as monotonic ticks
+	 * internally, we need to apply the new offset to the
+	 * monotonic clock to all outstanding timers based on the
+	 * affected clock.
+	 */
+	INIT_LIST_HEAD(&adjq);
+	delta = xnclock_ns_to_ticks(clock, delta_ns);
+
+	for_each_online_cpu(cpu) {
+		sched = xnsched_struct(cpu);
+		q = &xnclock_percpu_timerdata(clock, cpu)->q;
+
+		for (h = xntimerq_it_begin(q, &it); h;
+		     h = xntimerq_it_next(q, &it, h)) {
+			timer = container_of(h, struct xntimer, aplink);
+			if (timer->status & XNTIMER_REALTIME)
+				list_add_tail(&timer->adjlink, &adjq);
+		}
+
+		if (list_empty(&adjq))
+			continue;
+
+		list_for_each_entry_safe(timer, tmp, &adjq, adjlink) {
+			list_del(&timer->adjlink);
+			xntimer_dequeue(timer, q);
+			adjust_timer(timer, q, delta);
+		}
+
+		if (sched != xnsched_current())
+			xnclock_remote_shot(clock, sched);
+		else
+			xnclock_program_shot(clock, sched);
+	}
+}
+EXPORT_SYMBOL_GPL(xnclock_apply_offset);
+
+void xnclock_set_wallclock(xnticks_t epoch_ns)
+{
+	xnsticks_t old_offset_ns, offset_ns;
+	spl_t s;
+
+	/*
+	 * The epoch of CLOCK_REALTIME just changed. Since timeouts
+	 * are expressed as monotonic ticks, we need to apply the
+	 * wallclock-to-monotonic offset to all outstanding timers
+	 * based on this clock.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	old_offset_ns = nkclock.wallclock_offset;
+	offset_ns = (xnsticks_t)(epoch_ns - xnclock_core_read_monotonic());
+	nkclock.wallclock_offset = offset_ns;
+	nkvdso->wallclock_offset = offset_ns;
+	xnclock_apply_offset(&nkclock, offset_ns - old_offset_ns);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnclock_set_wallclock);
+
+xnticks_t xnclock_core_read_monotonic(void)
+{
+	return xnclock_core_ticks_to_ns(xnclock_core_read_raw());
+}
+EXPORT_SYMBOL_GPL(xnclock_core_read_monotonic);
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+static struct xnvfile_directory timerlist_vfroot;
+
+static struct xnvfile_snapshot_ops timerlist_ops;
+
+struct vfile_clock_priv {
+	struct xntimer *curr;
+};
+
+struct vfile_clock_data {
+	int cpu;
+	unsigned int scheduled;
+	unsigned int fired;
+	xnticks_t timeout;
+	xnticks_t interval;
+	unsigned long status;
+	char name[XNOBJECT_NAME_LEN];
+};
+
+static int timerlist_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_clock_priv *priv = xnvfile_iterator_priv(it);
+	struct xnclock *clock = xnvfile_priv(it->vfile);
+
+	if (list_empty(&clock->timerq))
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&clock->timerq, struct xntimer, next_stat);
+
+	return clock->nrtimers;
+}
+
+static int timerlist_next(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_clock_priv *priv = xnvfile_iterator_priv(it);
+	struct xnclock *clock = xnvfile_priv(it->vfile);
+	struct vfile_clock_data *p = data;
+	struct xntimer *timer;
+
+	if (priv->curr == NULL)
+		return 0;
+
+	timer = priv->curr;
+	if (list_is_last(&timer->next_stat, &clock->timerq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_entry(timer->next_stat.next,
+					struct xntimer, next_stat);
+
+	if (clock == &nkclock && xnstat_counter_get(&timer->scheduled) == 0)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(xntimer_sched(timer));
+	p->scheduled = xnstat_counter_get(&timer->scheduled);
+	p->fired = xnstat_counter_get(&timer->fired);
+	p->timeout = xntimer_get_timeout(timer);
+	p->interval = xntimer_interval(timer);
+	p->status = timer->status;
+	knamecpy(p->name, timer->name);
+
+	return 1;
+}
+
+static int timerlist_show(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_clock_data *p = data;
+	char timeout_buf[]  = "-         ";
+	char interval_buf[] = "-         ";
+	char hit_buf[32];
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-20s  %-10s  %-10s  %s\n",
+			       "CPU", "SCHED/SHOT", "TIMEOUT",
+			       "INTERVAL", "NAME");
+	else {
+		if (p->status & XNTIMER_RUNNING)
+			xntimer_format_time(p->timeout, timeout_buf,
+					    sizeof(timeout_buf));
+		if (p->status & XNTIMER_PERIODIC)
+			xntimer_format_time(p->interval, interval_buf,
+					    sizeof(interval_buf));
+		ksformat(hit_buf, sizeof(hit_buf), "%u/%u",
+			 p->scheduled, p->fired);
+		xnvfile_printf(it,
+			       "%-3u  %-20s  %-10s  %-10s  %s\n",
+			       p->cpu, hit_buf, timeout_buf,
+			       interval_buf, p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops timerlist_ops = {
+	.rewind = timerlist_rewind,
+	.next = timerlist_next,
+	.show = timerlist_show,
+};
+
+static void init_timerlist_proc(struct xnclock *clock)
+{
+	memset(&clock->timer_vfile, 0, sizeof(clock->timer_vfile));
+	clock->timer_vfile.privsz = sizeof(struct vfile_clock_priv);
+	clock->timer_vfile.datasz = sizeof(struct vfile_clock_data);
+	clock->timer_vfile.tag = &clock->timer_revtag;
+	clock->timer_vfile.ops = &timerlist_ops;
+
+	xnvfile_init_snapshot(clock->name, &clock->timer_vfile, &timerlist_vfroot);
+	xnvfile_priv(&clock->timer_vfile) = clock;
+}
+
+static void cleanup_timerlist_proc(struct xnclock *clock)
+{
+	xnvfile_destroy_snapshot(&clock->timer_vfile);
+}
+
+void init_timerlist_root(void)
+{
+	xnvfile_init_dir("timer", &timerlist_vfroot, &cobalt_vfroot);
+}
+
+void cleanup_timerlist_root(void)
+{
+	xnvfile_destroy_dir(&timerlist_vfroot);
+}
+
+#else  /* !CONFIG_XENO_OPT_STATS */
+
+static inline void init_timerlist_root(void) { }
+
+static inline void cleanup_timerlist_root(void) { }
+
+static inline void init_timerlist_proc(struct xnclock *clock) { }
+
+static inline void cleanup_timerlist_proc(struct xnclock *clock) { }
+
+#endif	/* !CONFIG_XENO_OPT_STATS */
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static struct xnvfile_directory clock_vfroot;
+
+void print_core_clock_status(struct xnclock *clock,
+			     struct xnvfile_regular_iterator *it)
+{
+	const char *wd_status = "off";
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	wd_status = "on";
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+
+	xnvfile_printf(it, "%8s: timer=%s, clock=%s\n",
+		       "devices", pipeline_timer_name(), pipeline_clock_name());
+	xnvfile_printf(it, "%8s: %s\n", "watchdog", wd_status);
+}
+
+static int clock_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct xnclock *clock = xnvfile_priv(it->vfile);
+	xnticks_t now = xnclock_read_raw(clock);
+
+	if (clock->id >= 0)	/* External clock, print id. */
+		xnvfile_printf(it, "%7s: %d\n", "id", __COBALT_CLOCK_EXT(clock->id));
+
+	xnvfile_printf(it, "%7s: irq=%Ld kernel=%Ld user=%Ld\n", "gravity",
+		       xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, irq)),
+		       xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, kernel)),
+		       xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, user)));
+
+	xnclock_print_status(clock, it);
+
+	xnvfile_printf(it, "%7s: %Lu (%.4Lx %.4x)\n", "ticks",
+		       now, now >> 32, (u32)(now & -1U));
+
+	return 0;
+}
+
+static ssize_t clock_store(struct xnvfile_input *input)
+{
+	char buf[128], *args = buf, *p;
+	struct xnclock_gravity gravity;
+	struct xnvfile_regular *vfile;
+	unsigned long ns, ticks;
+	struct xnclock *clock;
+	ssize_t nbytes;
+	int ret;
+
+	nbytes = xnvfile_get_string(input, buf, sizeof(buf));
+	if (nbytes < 0)
+		return nbytes;
+
+	vfile = container_of(input->vfile, struct xnvfile_regular, entry);
+	clock = xnvfile_priv(vfile);
+	gravity = clock->gravity;
+
+	while ((p = strsep(&args, " \t:/,")) != NULL) {
+		if (*p == '\0')
+			continue;
+		ns = simple_strtol(p, &p, 10);
+		ticks = xnclock_ns_to_ticks(clock, ns);
+		switch (*p) {
+		case 'i':
+			gravity.irq = ticks;
+			break;
+		case 'k':
+			gravity.kernel = ticks;
+			break;
+		case 'u':
+		case '\0':
+			gravity.user = ticks;
+			break;
+		default:
+			return -EINVAL;
+		}
+		ret = xnclock_set_gravity(clock, &gravity);
+		if (ret)
+			return ret;
+	}
+
+	return nbytes;
+}
+
+static struct xnvfile_regular_ops clock_ops = {
+	.show = clock_show,
+	.store = clock_store,
+};
+
+static void init_clock_proc(struct xnclock *clock)
+{
+	memset(&clock->vfile, 0, sizeof(clock->vfile));
+	clock->vfile.ops = &clock_ops;
+	xnvfile_init_regular(clock->name, &clock->vfile, &clock_vfroot);
+	xnvfile_priv(&clock->vfile) = clock;
+	init_timerlist_proc(clock);
+}
+
+static void cleanup_clock_proc(struct xnclock *clock)
+{
+	cleanup_timerlist_proc(clock);
+	xnvfile_destroy_regular(&clock->vfile);
+}
+
+void xnclock_init_proc(void)
+{
+	xnvfile_init_dir("clock", &clock_vfroot, &cobalt_vfroot);
+	init_timerlist_root();
+}
+
+void xnclock_cleanup_proc(void)
+{
+	xnvfile_destroy_dir(&clock_vfroot);
+	cleanup_timerlist_root();
+}
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static inline void init_clock_proc(struct xnclock *clock) { }
+
+static inline void cleanup_clock_proc(struct xnclock *clock) { }
+
+#endif	/* !CONFIG_XENO_OPT_VFILE */
+
+/**
+ * @brief Register a Xenomai clock.
+ *
+ * This service installs a new clock which may be used to drive
+ * Xenomai timers.
+ *
+ * @param clock The new clock to register.
+ *
+ * @param affinity The set of CPUs we may expect the backing clock
+ * device to tick on. As a special case, passing a NULL affinity mask
+ * means that timer IRQs cannot be seen as percpu events, in which
+ * case all outstanding timers will be maintained into a single global
+ * queue instead of percpu timer queues.
+ *
+ * @coretags{secondary-only}
+ */
+int xnclock_register(struct xnclock *clock, const cpumask_t *affinity)
+{
+	struct xntimerdata *tmd;
+	int cpu;
+
+	secondary_mode_only();
+
+#ifdef CONFIG_SMP
+	/*
+	 * A CPU affinity set may be defined for each clock,
+	 * enumerating the CPUs which can receive ticks from the
+	 * backing clock device.  When given, this set must be a
+	 * subset of the real-time CPU set.
+	 */
+	if (affinity) {
+		cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus);
+		if (cpumask_empty(&clock->affinity))
+			return -EINVAL;
+	} else	/* Device is global without particular IRQ affinity. */
+		cpumask_clear(&clock->affinity);
+#endif
+
+	/* Allocate the percpu timer queue slot. */
+	clock->timerdata = alloc_percpu(struct xntimerdata);
+	if (clock->timerdata == NULL)
+		return -ENOMEM;
+
+	/*
+	 * POLA: init all timer slots for the new clock, although some
+	 * of them might remain unused depending on the CPU affinity
+	 * of the event source(s). If the clock device is global
+	 * without any particular IRQ affinity, all timers will be
+	 * queued to CPU0.
+	 */
+	for_each_online_cpu(cpu) {
+		tmd = xnclock_percpu_timerdata(clock, cpu);
+		xntimerq_init(&tmd->q);
+	}
+
+#ifdef CONFIG_XENO_OPT_STATS
+	INIT_LIST_HEAD(&clock->timerq);
+#endif /* CONFIG_XENO_OPT_STATS */
+
+	init_clock_proc(clock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnclock_register);
+
+/**
+ * @fn void xnclock_deregister(struct xnclock *clock)
+ * @brief Deregister a Xenomai clock.
+ *
+ * This service uninstalls a Xenomai clock previously registered with
+ * xnclock_register().
+ *
+ * This service may be called once all timers driven by @a clock have
+ * been stopped.
+ *
+ * @param clock The clock to deregister.
+ *
+ * @coretags{secondary-only}
+ */
+void xnclock_deregister(struct xnclock *clock)
+{
+	struct xntimerdata *tmd;
+	int cpu;
+
+	secondary_mode_only();
+
+	cleanup_clock_proc(clock);
+
+	for_each_online_cpu(cpu) {
+		tmd = xnclock_percpu_timerdata(clock, cpu);
+		XENO_BUG_ON(COBALT, !xntimerq_empty(&tmd->q));
+		xntimerq_destroy(&tmd->q);
+	}
+
+	free_percpu(clock->timerdata);
+}
+EXPORT_SYMBOL_GPL(xnclock_deregister);
+
+/**
+ * @fn void xnclock_tick(struct xnclock *clock)
+ * @brief Process a clock tick.
+ *
+ * This routine processes an incoming @a clock event, firing elapsed
+ * timers as appropriate.
+ *
+ * @param clock The clock for which a new event was received.
+ *
+ * @coretags{coreirq-only, atomic-entry}
+ *
+ * @note The current CPU must be part of the real-time affinity set
+ * unless the clock device has no particular IRQ affinity, otherwise
+ * weird things may happen.
+ */
+void xnclock_tick(struct xnclock *clock)
+{
+	struct xnsched *sched = xnsched_current();
+	struct xntimer *timer;
+	xnsticks_t delta;
+	xntimerq_t *tmq;
+	xnticks_t now;
+	xntimerh_t *h;
+
+	atomic_only();
+
+#ifdef CONFIG_SMP
+	/*
+	 * Some external clock devices may be global without any
+	 * particular IRQ affinity, in which case the associated
+	 * timers will be queued to CPU0.
+	 */
+	if (IS_ENABLED(CONFIG_XENO_OPT_EXTCLOCK) &&
+	    clock != &nkclock &&
+	    !cpumask_test_cpu(xnsched_cpu(sched), &clock->affinity))
+		tmq = &xnclock_percpu_timerdata(clock, 0)->q;
+	else
+#endif
+		tmq = &xnclock_this_timerdata(clock)->q;
+
+	/*
+	 * Optimisation: any local timer reprogramming triggered by
+	 * invoked timer handlers can wait until we leave the tick
+	 * handler. Use this status flag as hint to xntimer_start().
+	 */
+	sched->status |= XNINTCK;
+
+	now = xnclock_read_raw(clock);
+	while ((h = xntimerq_head(tmq)) != NULL) {
+		timer = container_of(h, struct xntimer, aplink);
+		delta = (xnsticks_t)(xntimerh_date(&timer->aplink) - now);
+		if (delta > 0)
+			break;
+
+		trace_cobalt_timer_expire(timer);
+
+		xntimer_dequeue(timer, tmq);
+		xntimer_account_fired(timer);
+
+		/*
+		 * By postponing the propagation of the low-priority
+		 * host tick to the interrupt epilogue (see
+		 * xnintr_irq_handler()), we save some I-cache, which
+		 * translates into precious microsecs on low-end hw.
+		 */
+		if (unlikely(timer == &sched->htimer)) {
+			sched->lflags |= XNHTICK;
+			sched->lflags &= ~XNHDEFER;
+			if (timer->status & XNTIMER_PERIODIC)
+				goto advance;
+			continue;
+		}
+
+		timer->handler(timer);
+		now = xnclock_read_raw(clock);
+		timer->status |= XNTIMER_FIRED;
+		/*
+		 * Only requeue periodic timers which have not been
+		 * requeued, stopped or killed.
+		 */
+		if ((timer->status &
+		     (XNTIMER_PERIODIC|XNTIMER_DEQUEUED|XNTIMER_KILLED|XNTIMER_RUNNING)) !=
+		    (XNTIMER_PERIODIC|XNTIMER_DEQUEUED|XNTIMER_RUNNING))
+			continue;
+	advance:
+		do {
+			timer->periodic_ticks++;
+			xntimer_update_date(timer);
+		} while (xntimerh_date(&timer->aplink) < now);
+
+#ifdef CONFIG_SMP
+		/*
+		 * If the timer was migrated over its timeout handler,
+		 * xntimer_migrate() re-queued it already.
+		 */
+		if (unlikely(timer->sched != sched))
+			continue;
+#endif
+		xntimer_enqueue(timer, tmq);
+	}
+
+	sched->status &= ~XNINTCK;
+
+	xnclock_program_shot(clock, sched);
+}
+EXPORT_SYMBOL_GPL(xnclock_tick);
+
+static int set_core_clock_gravity(struct xnclock *clock,
+				  const struct xnclock_gravity *p)
+{
+	nkclock.gravity = *p;
+
+	return 0;
+}
+
+static void reset_core_clock_gravity(struct xnclock *clock)
+{
+	struct xnclock_gravity gravity;
+
+	xnarch_get_latencies(&gravity);
+	if (gravity.kernel == 0)
+		gravity.kernel = gravity.user;
+	set_core_clock_gravity(clock, &gravity);
+}
+
+struct xnclock nkclock = {
+	.name = "coreclk",
+	.resolution = 1,	/* nanosecond. */
+	.ops = {
+		.set_gravity = set_core_clock_gravity,
+		.reset_gravity = reset_core_clock_gravity,
+#ifdef CONFIG_XENO_OPT_VFILE
+		.print_status = print_core_clock_status,
+#endif
+	},
+	.id = -1,
+};
+EXPORT_SYMBOL_GPL(nkclock);
+
+void xnclock_cleanup(void)
+{
+	xnclock_deregister(&nkclock);
+}
+
+int __init xnclock_init()
+{
+	spl_t s;
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	xnarch_init_u32frac(&bln_frac, 1, 1000000000);
+#endif
+	pipeline_init_clock();
+	xnclock_reset_gravity(&nkclock);
+	xnlock_get_irqsave(&nklock, s);
+	nkclock.wallclock_offset = pipeline_read_wallclock() -
+		xnclock_core_read_monotonic();
+	xnlock_put_irqrestore(&nklock, s);
+	xnclock_register(&nkclock, &xnsched_realtime_cpus);
+
+	return 0;
+}
+
+/** @} */
+++ linux-patched/kernel/xenomai/registry.c	2022-03-21 12:58:28.776894939 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/lock.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/assert.h>
+#include <pipeline/sirq.h>
+#include <trace/events/cobalt-core.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_registry Registry services
+ *
+ * The registry provides a mean to index object descriptors on unique
+ * alphanumeric keys. When labeled this way, an object is globally
+ * exported; it can be searched for, and its descriptor returned to
+ * the caller for further use; the latter operation is called a
+ * "binding". When no object has been registered under the given name
+ * yet, the registry can be asked to set up a rendez-vous, blocking
+ * the caller until the object is eventually registered.
+ *
+ *@{
+ */
+
+struct xnobject *registry_obj_slots;
+EXPORT_SYMBOL_GPL(registry_obj_slots);
+
+static LIST_HEAD(free_object_list); /* Free objects. */
+
+static LIST_HEAD(busy_object_list); /* Active and exported objects. */
+
+static unsigned int nr_active_objects;
+
+static unsigned long next_object_stamp;
+
+static struct hlist_head *object_index;
+
+static int nr_object_entries;
+
+static struct xnsynch register_synch;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+#include <linux/workqueue.h>
+
+static void proc_callback(struct work_struct *work);
+
+static irqreturn_t registry_proc_schedule(int virq, void *dev_id);
+
+static LIST_HEAD(proc_object_list);	/* Objects waiting for /proc handling. */
+
+static DECLARE_WORK(registry_proc_work, proc_callback);
+
+static int proc_virq;
+
+static struct xnvfile_directory registry_vfroot;
+
+static int usage_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	xnvfile_printf(it, "%u/%u\n",
+		       nr_active_objects,
+		       CONFIG_XENO_OPT_REGISTRY_NRSLOTS);
+	return 0;
+}
+
+static struct xnvfile_regular_ops usage_vfile_ops = {
+	.show = usage_vfile_show,
+};
+
+static struct xnvfile_regular usage_vfile = {
+	.ops = &usage_vfile_ops,
+};
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+unsigned xnregistry_hash_size(void)
+{
+	static const int primes[] = {
+		101, 211, 307, 401, 503, 601,
+		701, 809, 907, 1009, 1103
+	};
+
+#define obj_hash_max(n)			 \
+((n) < sizeof(primes) / sizeof(int) ? \
+ (n) : sizeof(primes) / sizeof(int) - 1)
+
+	return primes[obj_hash_max(CONFIG_XENO_OPT_REGISTRY_NRSLOTS / 100)];
+}
+
+int xnregistry_init(void)
+{
+	int n, ret __maybe_unused;
+
+	registry_obj_slots = kmalloc(CONFIG_XENO_OPT_REGISTRY_NRSLOTS *
+				     sizeof(struct xnobject), GFP_KERNEL);
+	if (registry_obj_slots == NULL)
+		return -ENOMEM;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	ret = xnvfile_init_dir("registry", &registry_vfroot, &cobalt_vfroot);
+	if (ret)
+		return ret;
+
+	ret = xnvfile_init_regular("usage", &usage_vfile, &registry_vfroot);
+	if (ret) {
+		xnvfile_destroy_dir(&registry_vfroot);
+		return ret;
+	}
+
+	proc_virq = pipeline_create_inband_sirq(registry_proc_schedule);
+	if (proc_virq < 0) {
+		xnvfile_destroy_regular(&usage_vfile);
+		xnvfile_destroy_dir(&registry_vfroot);
+		return proc_virq;
+	}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	next_object_stamp = 0;
+
+	for (n = 0; n < CONFIG_XENO_OPT_REGISTRY_NRSLOTS; n++) {
+		registry_obj_slots[n].objaddr = NULL;
+		list_add_tail(&registry_obj_slots[n].link, &free_object_list);
+	}
+
+	/* Slot #0 is reserved/invalid. */
+	list_get_entry(&free_object_list, struct xnobject, link);
+	nr_active_objects = 1;
+
+	nr_object_entries = xnregistry_hash_size();
+	object_index = kmalloc(sizeof(*object_index) *
+				      nr_object_entries, GFP_KERNEL);
+
+	if (object_index == NULL) {
+#ifdef CONFIG_XENO_OPT_VFILE
+		xnvfile_destroy_regular(&usage_vfile);
+		xnvfile_destroy_dir(&registry_vfroot);
+		pipeline_delete_inband_sirq(proc_virq);
+#endif /* CONFIG_XENO_OPT_VFILE */
+		return -ENOMEM;
+	}
+
+	for (n = 0; n < nr_object_entries; n++)
+		INIT_HLIST_HEAD(&object_index[n]);
+
+	xnsynch_init(&register_synch, XNSYNCH_FIFO, NULL);
+
+	return 0;
+}
+
+void xnregistry_cleanup(void)
+{
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct hlist_node *enext;
+	struct xnobject *ecurr;
+	struct xnpnode *pnode;
+	int n;
+
+	flush_scheduled_work();
+
+	for (n = 0; n < nr_object_entries; n++)
+		hlist_for_each_entry_safe(ecurr, enext, 
+					&object_index[n], hlink) {
+			pnode = ecurr->pnode;
+			if (pnode == NULL)
+				continue;
+
+			pnode->ops->unexport(ecurr, pnode);
+
+			if (--pnode->entries > 0)
+				continue;
+
+			xnvfile_destroy_dir(&pnode->vdir);
+
+			if (--pnode->root->entries == 0)
+				xnvfile_destroy_dir(&pnode->root->vdir);
+		}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	kfree(object_index);
+	xnsynch_destroy(&register_synch);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	pipeline_delete_inband_sirq(proc_virq);
+	flush_scheduled_work();
+	xnvfile_destroy_regular(&usage_vfile);
+	xnvfile_destroy_dir(&registry_vfroot);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	kfree(registry_obj_slots);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static DEFINE_SEMAPHORE(export_mutex);
+
+/*
+ * The following stuff implements the mechanism for delegating
+ * export/unexport requests to/from the /proc interface from the
+ * Xenomai domain to the Linux kernel (i.e. the "lower stage"). This
+ * ends up being a bit complex due to the fact that such requests
+ * might lag enough before being processed by the Linux kernel so that
+ * subsequent requests might just contradict former ones before they
+ * even had a chance to be applied (e.g. export -> unexport in the
+ * Xenomai domain for short-lived objects). This situation and the
+ * like are hopefully properly handled due to a careful
+ * synchronization of operations across domains.
+ */
+static void proc_callback(struct work_struct *work)
+{
+	struct xnvfile_directory *rdir, *dir;
+	const char *rname, *type;
+	struct xnobject *object;
+	struct xnpnode *pnode;
+	int ret;
+	spl_t s;
+
+	down(&export_mutex);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	while (!list_empty(&proc_object_list)) {
+		object = list_get_entry(&proc_object_list,
+					struct xnobject, link);
+		pnode = object->pnode;
+		type = pnode->dirname;
+		dir = &pnode->vdir;
+		rdir = &pnode->root->vdir;
+		rname = pnode->root->dirname;
+
+		if (object->vfilp != XNOBJECT_EXPORT_SCHEDULED)
+			goto unexport;
+
+		object->vfilp = XNOBJECT_EXPORT_INPROGRESS;
+		list_add_tail(&object->link, &busy_object_list);
+
+		xnlock_put_irqrestore(&nklock, s);
+
+		if (pnode->entries++ == 0) {
+			if (pnode->root->entries++ == 0) {
+				/* Create the root directory on the fly. */
+				ret = xnvfile_init_dir(rname, rdir, &registry_vfroot);
+				if (ret) {
+					xnlock_get_irqsave(&nklock, s);
+					object->pnode = NULL;
+					pnode->root->entries = 0;
+					pnode->entries = 0;
+					continue;
+				}
+			}
+			/* Create the class directory on the fly. */
+			ret = xnvfile_init_dir(type, dir, rdir);
+			if (ret) {
+				if (pnode->root->entries == 1) {
+					pnode->root->entries = 0;
+					xnvfile_destroy_dir(rdir);
+				}
+				xnlock_get_irqsave(&nklock, s);
+				object->pnode = NULL;
+				pnode->entries = 0;
+				continue;
+			}
+		}
+
+		ret = pnode->ops->export(object, pnode);
+		if (ret && --pnode->entries == 0) {
+			xnvfile_destroy_dir(dir);
+			if (--pnode->root->entries == 0)
+				xnvfile_destroy_dir(rdir);
+			xnlock_get_irqsave(&nklock, s);
+			object->pnode = NULL;
+		} else
+			xnlock_get_irqsave(&nklock, s);
+
+		continue;
+
+	unexport:
+		object->vfilp = NULL;
+		object->pnode = NULL;
+
+		if (object->vfilp == XNOBJECT_EXPORT_ABORTED)
+			object->objaddr = NULL;
+
+		if (object->objaddr)
+			list_add_tail(&object->link, &busy_object_list);
+		else {
+			/*
+			 * Trap the case where we are unexporting an
+			 * already unregistered object.
+			 */
+			list_add_tail(&object->link, &free_object_list);
+			nr_active_objects--;
+		}
+
+		xnlock_put_irqrestore(&nklock, s);
+
+		pnode->ops->unexport(object, pnode);
+
+		if (--pnode->entries == 0) {
+			xnvfile_destroy_dir(dir);
+			if (--pnode->root->entries == 0)
+				xnvfile_destroy_dir(rdir);
+		}
+
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	up(&export_mutex);
+}
+
+static irqreturn_t registry_proc_schedule(int virq, void *dev_id)
+{
+	/*
+	 * schedule_work() will check for us if the work has already
+	 * been scheduled, so just be lazy and submit blindly.
+	 */
+	schedule_work(&registry_proc_work);
+
+	return IRQ_HANDLED;
+}
+
+static int registry_export_vfsnap(struct xnobject *object,
+				  struct xnpnode *pnode)
+{
+	struct xnpnode_snapshot *p;
+	int ret;
+
+	/*
+	 * Make sure to initialize _all_ mandatory vfile fields; most
+	 * of the time we are using sane NULL defaults based on static
+	 * storage for the vfile struct, but here we are building up a
+	 * vfile object explicitly.
+	 */
+	p = container_of(pnode, struct xnpnode_snapshot, node);
+	object->vfile_u.vfsnap.file.datasz = p->vfile.datasz;
+	object->vfile_u.vfsnap.file.privsz = p->vfile.privsz;
+	/*
+	 * Make the vfile refer to the provided tag struct if any,
+	 * otherwise use our default tag space. In the latter case,
+	 * each object family has its own private revision tag.
+	 */
+	object->vfile_u.vfsnap.file.tag = p->vfile.tag ?:
+		&object->vfile_u.vfsnap.tag;
+	object->vfile_u.vfsnap.file.ops = p->vfile.ops;
+	object->vfile_u.vfsnap.file.entry.lockops = p->vfile.lockops;
+
+	ret = xnvfile_init_snapshot(object->key, &object->vfile_u.vfsnap.file,
+				    &pnode->vdir);
+	if (ret)
+		return ret;
+
+	object->vfilp = &object->vfile_u.vfsnap.file.entry;
+	object->vfilp->private = object->objaddr;
+
+	return 0;
+}
+
+static void registry_unexport_vfsnap(struct xnobject *object,
+				    struct xnpnode *pnode)
+{
+	xnvfile_destroy_snapshot(&object->vfile_u.vfsnap.file);
+}
+
+static void registry_touch_vfsnap(struct xnobject *object)
+{
+	xnvfile_touch(&object->vfile_u.vfsnap.file);
+}
+
+struct xnpnode_ops xnregistry_vfsnap_ops = {
+	.export = registry_export_vfsnap,
+	.unexport = registry_unexport_vfsnap,
+	.touch = registry_touch_vfsnap,
+};
+EXPORT_SYMBOL_GPL(xnregistry_vfsnap_ops);
+
+static int registry_export_vfreg(struct xnobject *object,
+				 struct xnpnode *pnode)
+{
+	struct xnpnode_regular *p;
+	int ret;
+
+	/* See registry_export_vfsnap() for hints. */
+	p = container_of(pnode, struct xnpnode_regular, node);
+	object->vfile_u.vfreg.privsz = p->vfile.privsz;
+	object->vfile_u.vfreg.ops = p->vfile.ops;
+	object->vfile_u.vfreg.entry.lockops = p->vfile.lockops;
+	object->vfile_u.vfreg.entry.refcnt = 0;
+
+	ret = xnvfile_init_regular(object->key, &object->vfile_u.vfreg,
+				   &pnode->vdir);
+	if (ret)
+		return ret;
+
+	object->vfilp = &object->vfile_u.vfreg.entry;
+	object->vfilp->private = object->objaddr;
+
+	return 0;
+}
+
+static void registry_unexport_vfreg(struct xnobject *object,
+				    struct xnpnode *pnode)
+{
+	xnvfile_destroy_regular(&object->vfile_u.vfreg);
+}
+
+struct xnpnode_ops xnregistry_vfreg_ops = {
+	.export = registry_export_vfreg,
+	.unexport = registry_unexport_vfreg,
+};
+EXPORT_SYMBOL_GPL(xnregistry_vfreg_ops);
+
+static int registry_export_vlink(struct xnobject *object,
+				 struct xnpnode *pnode)
+{
+	struct xnpnode_link *link_desc;
+	char *link_target;
+	int ret;
+
+	link_desc = container_of(pnode, struct xnpnode_link, node);
+	link_target = link_desc->target(object->objaddr);
+	if (link_target == NULL)
+		return -ENOMEM;
+
+	ret = xnvfile_init_link(object->key, link_target,
+				&object->vfile_u.link, &pnode->vdir);
+	kfree(link_target);
+	if (ret)
+		return ret;
+
+	object->vfilp = &object->vfile_u.link.entry;
+	object->vfilp->private = object->objaddr;
+
+	return 0;
+}
+
+static void registry_unexport_vlink(struct xnobject *object,
+				    struct xnpnode *pnode)
+{
+	xnvfile_destroy_link(&object->vfile_u.link);
+}
+
+struct xnpnode_ops xnregistry_vlink_ops = {
+	.export = registry_export_vlink,
+	.unexport = registry_unexport_vlink,
+};
+EXPORT_SYMBOL_GPL(xnregistry_vlink_ops);
+
+static inline void registry_export_pnode(struct xnobject *object,
+					 struct xnpnode *pnode)
+{
+	object->vfilp = XNOBJECT_EXPORT_SCHEDULED;
+	object->pnode = pnode;
+	list_del(&object->link);
+	list_add_tail(&object->link, &proc_object_list);
+	pipeline_post_sirq(proc_virq);
+}
+
+static inline void registry_unexport_pnode(struct xnobject *object)
+{
+	if (object->vfilp != XNOBJECT_EXPORT_SCHEDULED) {
+		/*
+		 * We might have preempted a v-file read op, so bump
+		 * the object's revtag to make sure the data
+		 * collection is aborted next, if we end up deleting
+		 * the object being read.
+		 */
+		if (object->pnode->ops->touch)
+			object->pnode->ops->touch(object);
+		list_del(&object->link);
+		list_add_tail(&object->link, &proc_object_list);
+		pipeline_post_sirq(proc_virq);
+	} else {
+		/*
+		 * Unexporting before the lower stage has had a chance
+		 * to export. Move back the object to the busyq just
+		 * like if no export had been requested.
+		 */
+		list_del(&object->link);
+		list_add_tail(&object->link, &busy_object_list);
+		object->pnode = NULL;
+		object->vfilp = NULL;
+	}
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+static unsigned registry_hash_crunch(const char *key)
+{
+	unsigned int h = 0, g;
+
+#define HQON    24		/* Higher byte position */
+#define HBYTE   0xf0000000	/* Higher nibble on */
+
+	while (*key) {
+		h = (h << 4) + *key++;
+		if ((g = (h & HBYTE)) != 0)
+			h = (h ^ (g >> HQON)) ^ g;
+	}
+
+	return h % nr_object_entries;
+}
+
+static inline int registry_hash_enter(const char *key, struct xnobject *object)
+{
+	struct xnobject *ecurr;
+	unsigned s;
+
+	object->key = key;
+	s = registry_hash_crunch(key);
+
+	hlist_for_each_entry(ecurr, &object_index[s], hlink)
+		if (ecurr == object || strcmp(key, ecurr->key) == 0)
+			return -EEXIST;
+
+	hlist_add_head(&object->hlink, &object_index[s]);
+
+	return 0;
+}
+
+static inline int registry_hash_remove(struct xnobject *object)
+{
+	unsigned int s = registry_hash_crunch(object->key);
+	struct xnobject *ecurr;
+
+	hlist_for_each_entry(ecurr, &object_index[s], hlink)
+		if (ecurr == object) {
+			hlist_del(&ecurr->hlink);
+			return 0;
+		}
+
+	return -ESRCH;
+}
+
+static struct xnobject *registry_hash_find(const char *key)
+{
+	struct xnobject *ecurr;
+
+	hlist_for_each_entry(ecurr, 
+			&object_index[registry_hash_crunch(key)], hlink)
+		if (strcmp(key, ecurr->key) == 0)
+			return ecurr;
+
+	return NULL;
+}
+
+struct registry_wait_context {
+	struct xnthread_wait_context wc;
+	const char *key;
+};
+
+static inline int registry_wakeup_sleepers(const char *key)
+{
+	struct registry_wait_context *rwc;
+	struct xnthread_wait_context *wc;
+	struct xnthread *sleeper, *tmp;
+	int cnt = 0;
+
+	xnsynch_for_each_sleeper_safe(sleeper, tmp, &register_synch) {
+		wc = xnthread_get_wait_context(sleeper);
+		rwc = container_of(wc, struct registry_wait_context, wc);
+		if (*key == *rwc->key && strcmp(key, rwc->key) == 0) {
+			xnsynch_wakeup_this_sleeper(&register_synch, sleeper);
+			++cnt;
+		}
+	}
+
+	return cnt;
+}
+
+/**
+ * @fn int xnregistry_enter(const char *key,void *objaddr,xnhandle_t *phandle,struct xnpnode *pnode)
+ * @brief Register a real-time object.
+ *
+ * This service allocates a new registry slot for an associated
+ * object, and indexes it by an alphanumeric key for later retrieval.
+ *
+ * @param key A valid NULL-terminated string by which the object will
+ * be indexed and later retrieved in the registry. Since it is assumed
+ * that such key is stored into the registered object, it will *not*
+ * be copied but only kept by reference in the registry. Pass an empty
+ * or NULL string if the object shall only occupy a registry slot for
+ * handle-based lookups. The slash character is not accepted in @a key
+ * if @a pnode is non-NULL.
+ *
+ * @param objaddr An opaque pointer to the object to index by @a
+ * key.
+ *
+ * @param phandle A pointer to a generic handle defined by the
+ * registry which will uniquely identify the indexed object, until the
+ * latter is unregistered using the xnregistry_remove() service.
+ *
+ * @param pnode A pointer to an optional /proc node class
+ * descriptor. This structure provides the information needed to
+ * export all objects from the given class through the /proc
+ * filesystem, under the /proc/xenomai/registry entry. Passing NULL
+ * indicates that no /proc support is available for the newly
+ * registered object.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a objaddr is NULL.
+ *
+ * - -EINVAL if @a pnode is non-NULL, and @a key points to a valid
+ * string containing a '/' character.
+ *
+ * - -ENOMEM is returned if the system fails to get enough dynamic
+ * memory from the global real-time heap in order to register the
+ * object.
+ *
+ * - -EEXIST is returned if the @a key is already in use.
+ *
+ * @coretags{unrestricted, might-switch, atomic-entry}
+ */
+int xnregistry_enter(const char *key, void *objaddr,
+		     xnhandle_t *phandle, struct xnpnode *pnode)
+{
+	struct xnobject *object;
+	spl_t s;
+	int ret;
+
+	if (objaddr == NULL ||
+	    (pnode != NULL && key != NULL && strchr(key, '/')))
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&free_object_list)) {
+		ret = -EAGAIN;
+		goto unlock_and_exit;
+	}
+
+	object = list_get_entry(&free_object_list, struct xnobject, link);
+	nr_active_objects++;
+	object->objaddr = objaddr;
+	object->cstamp = ++next_object_stamp;
+	trace_cobalt_registry_enter(key, objaddr);
+#ifdef CONFIG_XENO_OPT_VFILE
+	object->pnode = NULL;
+#endif
+	if (key == NULL || *key == '\0') {
+		object->key = NULL;
+		*phandle = object - registry_obj_slots;
+		ret = 0;
+		goto unlock_and_exit;
+	}
+
+	ret = registry_hash_enter(key, object);
+	if (ret) {
+		nr_active_objects--;
+		list_add_tail(&object->link, &free_object_list);
+		goto unlock_and_exit;
+	}
+
+	list_add_tail(&object->link, &busy_object_list);
+
+	/*
+	 * <!> Make sure the handle is written back before the
+	 * rescheduling takes place.
+	 */
+	*phandle = object - registry_obj_slots;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	if (pnode)
+		registry_export_pnode(object, pnode);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	if (registry_wakeup_sleepers(key))
+		xnsched_run();
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnregistry_enter);
+
+/**
+ * @fn int xnregistry_bind(const char *key,xnticks_t timeout,int timeout_mode,xnhandle_t *phandle)
+ * @brief Bind to a real-time object.
+ *
+ * This service retrieves the registry handle of a given object
+ * identified by its key. Unless otherwise specified, this service
+ * will block the caller if the object is not registered yet, waiting
+ * for such registration to occur.
+ *
+ * @param key A valid NULL-terminated string which identifies the
+ * object to bind to.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread wait for the object to be registered. This value is a wait
+ * time given as a count of nanoseconds. It can either be relative,
+ * absolute monotonic (XN_ABSOLUTE), or absolute adjustable
+ * (XN_REALTIME) depending on @a timeout_mode. Passing XN_INFINITE @b
+ * and setting @a timeout_mode to XN_RELATIVE specifies an unbounded
+ * wait. Passing XN_NONBLOCK causes the service to return immediately
+ * without waiting if the object is not registered on entry. All other
+ * values are used as a wait limit.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @param phandle A pointer to a memory location which will be written
+ * upon success with the generic handle defined by the registry for
+ * the retrieved object. Contents of this memory is undefined upon
+ * failure.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a key is NULL.
+ *
+ * - -EINTR is returned if xnthread_unblock() has been called for the
+ * waiting thread before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to XN_NONBLOCK
+ * and the searched object is not registered on entry. As a special
+ * exception, this error is also returned if this service should
+ * block, but was called from a context which cannot sleep
+ * (e.g. interrupt, non-realtime or scheduler locked).
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note xnregistry_bind() only returns the index portion of a handle,
+ * which might include other fixed bits to be complete
+ * (e.g. XNSYNCH_PSHARED). The caller is responsible for completing
+ * the handle returned with those bits if applicable, depending on the
+ * context.
+ */
+int xnregistry_bind(const char *key, xnticks_t timeout, int timeout_mode,
+		    xnhandle_t *phandle)
+{
+	struct registry_wait_context rwc;
+	struct xnobject *object;
+	int ret = 0, info;
+	spl_t s;
+
+	if (key == NULL)
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (timeout_mode == XN_RELATIVE &&
+	    timeout != XN_INFINITE && timeout != XN_NONBLOCK) {
+		timeout_mode = XN_ABSOLUTE;
+		timeout += xnclock_read_monotonic(&nkclock);
+	}
+
+	for (;;) {
+		object = registry_hash_find(key);
+		if (object) {
+			*phandle = object - registry_obj_slots;
+			goto unlock_and_exit;
+		}
+
+		if ((timeout_mode == XN_RELATIVE && timeout == XN_NONBLOCK) ||
+		    xnsched_unblockable_p()) {
+			ret = -EWOULDBLOCK;
+			goto unlock_and_exit;
+		}
+
+		rwc.key = key;
+		xnthread_prepare_wait(&rwc.wc);
+		info = xnsynch_sleep_on(&register_synch, timeout, timeout_mode);
+		if (info & XNTIMEO) {
+			ret = -ETIMEDOUT;
+			goto unlock_and_exit;
+		}
+		if (info & XNBREAK) {
+			ret = -EINTR;
+			goto unlock_and_exit;
+		}
+	}
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnregistry_bind);
+
+/**
+ * @fn int xnregistry_remove(xnhandle_t handle)
+ * @brief Forcibly unregister a real-time object.
+ *
+ * This service forcibly removes an object from the registry. The
+ * removal is performed regardless of the current object's locking
+ * status.
+ *
+ * @param handle The generic handle of the object to remove.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -ESRCH is returned if @a handle does not reference a registered
+ * object.
+ *
+ * @coretags{unrestricted}
+ */
+int xnregistry_remove(xnhandle_t handle)
+{
+	struct xnobject *object;
+	void *objaddr;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	object = xnregistry_validate(handle);
+	if (object == NULL) {
+		ret = -ESRCH;
+		goto unlock_and_exit;
+	}
+
+	trace_cobalt_registry_remove(object->key, object->objaddr);
+
+	objaddr = object->objaddr;
+	object->objaddr = NULL;
+	object->cstamp = 0;
+
+	if (object->key) {
+		registry_hash_remove(object);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+		if (object->pnode) {
+			if (object->vfilp == XNOBJECT_EXPORT_INPROGRESS) {
+				object->vfilp = XNOBJECT_EXPORT_ABORTED;
+				object->objaddr = objaddr;
+			}
+
+			registry_unexport_pnode(object);
+			/*
+			 * Leave the update of the object queues to
+			 * the work callback if it has been kicked.
+			 */
+			if (object->pnode) {
+				xnlock_put_irqrestore(&nklock, s);
+				if (is_secondary_domain())
+					flush_work(&registry_proc_work);
+				return 0;
+			}
+		}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+		list_del(&object->link);
+	}
+
+	if (!IS_ENABLED(CONFIG_XENO_OPT_VFILE) || !object->objaddr) {
+		list_add_tail(&object->link, &free_object_list);
+		nr_active_objects--;
+	}
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnregistry_remove);
+
+/**
+ * Turn a named object into an anonymous object
+ *
+ * @coretags{unrestricted}
+ */
+int xnregistry_unlink(const char *key)
+{
+	struct xnobject *object;
+	int ret = 0;
+	spl_t s;
+
+	if (key == NULL)
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	object = registry_hash_find(key);
+	if (object == NULL) {
+		ret = -ESRCH;
+		goto unlock_and_exit;
+	}
+
+	trace_cobalt_registry_unlink(object->key, object->objaddr);
+
+	ret = registry_hash_remove(object);
+	if (ret < 0)
+		goto unlock_and_exit;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	if (object->pnode) {
+		registry_unexport_pnode(object);
+		/*
+		 * Leave the update of the object queues to
+		 * the work callback if it has been kicked.
+		 */
+		if (object->pnode)
+			goto unlock_and_exit;
+	}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	list_del(&object->link);
+
+	object->key = NULL;
+
+unlock_and_exit:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+/**
+ * @fn void *xnregistry_lookup(xnhandle_t handle, unsigned long *cstamp_r)
+ * @brief Find a real-time object into the registry.
+ *
+ * This service retrieves an object from its handle into the registry
+ * and returns the memory address of its descriptor. Optionally, it
+ * also copies back the object's creation stamp which is unique across
+ * object registration calls.
+ *
+ * @param handle The generic handle of the object to fetch.
+ *
+ * @param cstamp_r If not-NULL, the object's creation stamp will be
+ * copied to this memory area.
+ *
+ * @return The memory address of the object's descriptor is returned
+ * on success. Otherwise, NULL is returned if @a handle does not
+ * reference a registered object.
+ *
+ * @coretags{unrestricted}
+ */
+
+/** @} */
+++ linux-patched/kernel/xenomai/lock.c	2022-03-21 12:58:28.761895085 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/pipe.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <cobalt/kernel/lock.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_lock Locking services
+ *
+ * The Xenomai core deals with concurrent activities from two distinct
+ * kernels running side-by-side. When interrupts are involved, the
+ * services from this section control the @b hard interrupt state
+ * exclusively, for protecting against processor-local or SMP
+ * concurrency.
+ *
+ * @note In a dual kernel configuration, <i>hard interrupts</i> are
+ * gated by the CPU. When enabled, hard interrupts are immediately
+ * delivered to the Xenomai core if they belong to a real-time source,
+ * or deferred until enabled by a second-stage virtual interrupt mask,
+ * if they belong to regular Linux devices/sources.
+ *
+ * @{
+ */
+DEFINE_XNLOCK(nklock);
+#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING)
+EXPORT_SYMBOL_GPL(nklock);
+
+#ifdef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK
+int ___xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	return ____xnlock_get(lock /* , */ XNLOCK_DBG_PASS_CONTEXT);
+}
+EXPORT_SYMBOL_GPL(___xnlock_get);
+
+void ___xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	____xnlock_put(lock /* , */ XNLOCK_DBG_PASS_CONTEXT);
+}
+EXPORT_SYMBOL_GPL(___xnlock_put);
+#endif /* out of line xnlock */
+#endif /* CONFIG_SMP || XENO_DEBUG(LOCKING) */
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+DEFINE_PER_CPU(struct xnlockinfo, xnlock_stats);
+EXPORT_PER_CPU_SYMBOL_GPL(xnlock_stats);
+#endif
+
+/** @} */
+++ linux-patched/kernel/xenomai/pipe.c	2022-03-21 12:58:28.746895231 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/debug.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2005 Dmitry Adamushko <dmitry.adamushko@gmail.com>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA
+ * 02139, USA; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/fcntl.h>
+#include <linux/poll.h>
+#include <linux/termios.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+#include <asm/xenomai/syscall.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/pipe.h>
+#include <pipeline/sirq.h>
+
+static int xnpipe_asyncsig = SIGIO;
+
+struct xnpipe_state xnpipe_states[XNPIPE_NDEVS];
+EXPORT_SYMBOL_GPL(xnpipe_states);
+
+#define XNPIPE_BITMAP_SIZE	((XNPIPE_NDEVS + BITS_PER_LONG - 1) / BITS_PER_LONG)
+
+static unsigned long xnpipe_bitmap[XNPIPE_BITMAP_SIZE];
+
+static LIST_HEAD(xnpipe_sleepq);
+
+static LIST_HEAD(xnpipe_asyncq);
+
+static int xnpipe_wakeup_virq;
+
+static struct class *xnpipe_class;
+
+/* Allocation of minor values */
+
+static inline int xnpipe_minor_alloc(int minor)
+{
+	spl_t s;
+
+	if ((minor < 0 && minor != XNPIPE_MINOR_AUTO) || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (minor == XNPIPE_MINOR_AUTO)
+		minor = find_first_zero_bit(xnpipe_bitmap, XNPIPE_NDEVS);
+
+	if (minor == XNPIPE_NDEVS ||
+	    (xnpipe_bitmap[minor / BITS_PER_LONG] &
+	     (1UL << (minor % BITS_PER_LONG))))
+		minor = -EBUSY;
+	else
+		xnpipe_bitmap[minor / BITS_PER_LONG] |=
+			(1UL << (minor % BITS_PER_LONG));
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return minor;
+}
+
+static inline void xnpipe_minor_free(int minor)
+{
+	xnpipe_bitmap[minor / BITS_PER_LONG] &=
+		~(1UL << (minor % BITS_PER_LONG));
+}
+
+static inline void xnpipe_enqueue_wait(struct xnpipe_state *state, int mask)
+{
+	if (state->wcount != 0x7fffffff && state->wcount++ == 0)
+		list_add_tail(&state->slink, &xnpipe_sleepq);
+
+	state->status |= mask;
+}
+
+static inline void xnpipe_dequeue_wait(struct xnpipe_state *state, int mask)
+{
+	if (state->status & mask)
+		if (--state->wcount == 0) {
+			list_del(&state->slink);
+			state->status &= ~mask;
+		}
+}
+
+static inline void xnpipe_dequeue_all(struct xnpipe_state *state, int mask)
+{
+	if (state->status & mask) {
+		if (state->wcount) {
+			state->wcount = 0;
+			list_del(&state->slink);
+			state->status &= ~mask;
+		}
+	}
+}
+
+/* Must be entered with nklock held, interrupts off. */
+#define xnpipe_wait(__state, __mask, __s, __cond)			\
+({									\
+	wait_queue_head_t *__waitq;					\
+	DEFINE_WAIT(__wait);						\
+	int __sigpending;						\
+									\
+	if ((__mask) & XNPIPE_USER_WREAD)				\
+		__waitq = &(__state)->readq;				\
+	else								\
+		__waitq = &(__state)->syncq;				\
+									\
+	xnpipe_enqueue_wait(__state, __mask);				\
+	xnlock_put_irqrestore(&nklock, __s);				\
+									\
+	for (;;) {							\
+		__sigpending = signal_pending(current);			\
+		if (__sigpending)					\
+			break;						\
+		prepare_to_wait_exclusive(__waitq, &__wait, TASK_INTERRUPTIBLE); \
+		if (__cond || (__state)->status & XNPIPE_KERN_LCLOSE)	\
+			break;						\
+		schedule();						\
+	}								\
+									\
+	finish_wait(__waitq, &__wait);					\
+									\
+	/* Restore the interrupt state initially set by the caller. */	\
+	xnlock_get_irqsave(&nklock, __s);				\
+	xnpipe_dequeue_wait(__state, __mask);				\
+									\
+	__sigpending;							\
+})
+
+static irqreturn_t xnpipe_wakeup_proc(int sirq, void *dev_id)
+{
+	struct xnpipe_state *state;
+	unsigned long rbits;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/*
+	 * NOTE: sleepers might enter/leave the queue while we don't
+	 * hold the nklock in these wakeup loops. So we iterate over
+	 * each sleeper list until we find no more candidate for
+	 * wakeup after an entire scan, redoing the scan from the list
+	 * head otherwise.
+	 */
+	for (;;) {
+		if (list_empty(&xnpipe_sleepq))
+			goto check_async;
+
+		state = list_first_entry(&xnpipe_sleepq, struct xnpipe_state, slink);
+
+		for (;;) {
+			rbits = state->status & XNPIPE_USER_ALL_READY;
+			if (rbits)
+				break;
+			if (list_is_last(&state->slink, &xnpipe_sleepq))
+				goto check_async;
+			state = list_next_entry(state, slink);
+		}
+
+		state->status &= ~rbits;
+
+		if ((rbits & XNPIPE_USER_WREAD_READY) != 0) {
+			if (waitqueue_active(&state->readq)) {
+				xnlock_put_irqrestore(&nklock, s);
+				wake_up_interruptible(&state->readq);
+				xnlock_get_irqsave(&nklock, s);
+			}
+		}
+		if ((rbits & XNPIPE_USER_WSYNC_READY) != 0) {
+			if (waitqueue_active(&state->syncq)) {
+				xnlock_put_irqrestore(&nklock, s);
+				wake_up_interruptible(&state->syncq);
+				xnlock_get_irqsave(&nklock, s);
+			}
+		}
+	}
+
+check_async:
+	/*
+	 * Scan the async queue, sending the proper signal to
+	 * subscribers.
+	 */
+	for (;;) {
+		if (list_empty(&xnpipe_asyncq))
+			goto out;
+
+		state = list_first_entry(&xnpipe_asyncq, struct xnpipe_state, alink);
+
+		for (;;) {
+			if (state->status & XNPIPE_USER_SIGIO)
+				break;
+			if (list_is_last(&state->alink, &xnpipe_asyncq))
+				goto out;
+			state = list_next_entry(state, alink);
+		}
+
+		state->status &= ~XNPIPE_USER_SIGIO;
+		xnlock_put_irqrestore(&nklock, s);
+		kill_fasync(&state->asyncq, xnpipe_asyncsig, POLL_IN);
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return IRQ_HANDLED;
+}
+
+static inline void xnpipe_schedule_request(void) /* hw IRQs off */
+{
+	pipeline_post_sirq(xnpipe_wakeup_virq);
+}
+
+static inline ssize_t xnpipe_flush_bufq(void (*fn)(void *buf, void *xstate),
+					struct list_head *q,
+					void *xstate)
+{
+	struct xnpipe_mh *mh, *tmp;
+	ssize_t n = 0;
+
+	if (list_empty(q))
+		return 0;
+
+	/* Queue is private, no locking is required. */
+	list_for_each_entry_safe(mh, tmp, q, link) {
+		list_del(&mh->link);
+		n += xnpipe_m_size(mh);
+		fn(mh, xstate);
+	}
+
+	/* Return the overall count of bytes flushed. */
+	return n;
+}
+
+/*
+ * Move the specified queue contents to a private queue, then call the
+ * flush handler to purge it. The latter runs without locking.
+ * Returns the number of bytes flushed. Must be entered with nklock
+ * held, interrupts off.
+ */
+#define xnpipe_flushq(__state, __q, __f, __s)				\
+({									\
+	LIST_HEAD(__privq);						\
+	ssize_t __n;							\
+									\
+	list_splice_init(&(state)->__q, &__privq);			\
+	(__state)->nr ## __q = 0;					\
+	xnlock_put_irqrestore(&nklock, (__s));				\
+	__n = xnpipe_flush_bufq((__state)->ops.__f, &__privq, (__state)->xstate);	\
+	xnlock_get_irqsave(&nklock, (__s));				\
+									\
+	__n;								\
+})
+
+static void *xnpipe_default_alloc_ibuf(size_t size, void *xstate)
+{
+	void *buf;
+
+	buf = xnmalloc(size);
+	if (likely(buf != NULL))
+		return buf;
+
+	if (size > xnheap_get_size(&cobalt_heap))
+		/* Request will never succeed. */
+		return (struct xnpipe_mh *)-1;
+
+	return NULL;
+}
+
+static void xnpipe_default_free_ibuf(void *buf, void *xstate)
+{
+	xnfree(buf);
+}
+
+static void xnpipe_default_release(void *xstate)
+{
+}
+
+static inline int xnpipe_set_ops(struct xnpipe_state *state,
+				 struct xnpipe_operations *ops)
+{
+	state->ops = *ops;
+
+	if (ops->free_obuf == NULL)
+		/*
+		 * Caller must provide a way to free unread outgoing
+		 * buffers.
+		 */
+		return -EINVAL;
+
+	/* Set some default handlers for common usage. */
+	if (ops->alloc_ibuf == NULL)
+		state->ops.alloc_ibuf = xnpipe_default_alloc_ibuf;
+	if (ops->free_ibuf == NULL)
+		state->ops.free_ibuf = xnpipe_default_free_ibuf;
+	if (ops->release == NULL)
+		state->ops.release = xnpipe_default_release;
+
+	return 0;
+}
+
+int xnpipe_connect(int minor, struct xnpipe_operations *ops, void *xstate)
+{
+	struct xnpipe_state *state;
+	int need_sched = 0, ret;
+	spl_t s;
+
+	minor = xnpipe_minor_alloc(minor);
+	if (minor < 0)
+		return minor;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	ret = xnpipe_set_ops(state, ops);
+	if (ret) {
+		xnlock_put_irqrestore(&nklock, s);
+		return ret;
+	}
+
+	state->status |= XNPIPE_KERN_CONN;
+	xnsynch_init(&state->synchbase, XNSYNCH_FIFO, NULL);
+	state->xstate = xstate;
+	state->ionrd = 0;
+
+	if (state->status & XNPIPE_USER_CONN) {
+		if (state->status & XNPIPE_USER_WREAD) {
+			/*
+			 * Wake up the regular Linux task waiting for
+			 * the kernel side to connect (xnpipe_open).
+			 */
+			state->status |= XNPIPE_USER_WREAD_READY;
+			need_sched = 1;
+		}
+
+		if (state->asyncq) {	/* Schedule asynch sig. */
+			state->status |= XNPIPE_USER_SIGIO;
+			need_sched = 1;
+		}
+	}
+
+	if (need_sched)
+		xnpipe_schedule_request();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return minor;
+}
+EXPORT_SYMBOL_GPL(xnpipe_connect);
+
+int xnpipe_disconnect(int minor)
+{
+	struct xnpipe_state *state;
+	int need_sched = 0;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	state->status &= ~XNPIPE_KERN_CONN;
+
+	state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);
+
+	if ((state->status & XNPIPE_USER_CONN) == 0)
+		goto cleanup;
+
+	xnpipe_flushq(state, inq, free_ibuf, s);
+
+	if (xnsynch_destroy(&state->synchbase) == XNSYNCH_RESCHED)
+		xnsched_run();
+
+	if (state->status & XNPIPE_USER_WREAD) {
+		/*
+		 * Wake up the regular Linux task waiting for some
+		 * operation from the Xenomai side (read/write or
+		 * poll).
+		 */
+		state->status |= XNPIPE_USER_WREAD_READY;
+		need_sched = 1;
+	}
+
+	if (state->asyncq) {	/* Schedule asynch sig. */
+		state->status |= XNPIPE_USER_SIGIO;
+		need_sched = 1;
+	}
+
+cleanup:
+	/*
+	 * If xnpipe_release() has not fully run, enter lingering
+	 * close. This will prevent the extra state from being wiped
+	 * out until then.
+	 */
+	if (state->status & XNPIPE_USER_CONN)
+		state->status |= XNPIPE_KERN_LCLOSE;
+	else {
+		xnlock_put_irqrestore(&nklock, s);
+		state->ops.release(state->xstate);
+		xnlock_get_irqsave(&nklock, s);
+		xnpipe_minor_free(minor);
+	}
+
+	if (need_sched)
+		xnpipe_schedule_request();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnpipe_disconnect);
+
+ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, size_t size, int flags)
+{
+	struct xnpipe_state *state;
+	int need_sched = 0;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	if (size <= sizeof(*mh))
+		return -EINVAL;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	xnpipe_m_size(mh) = size - sizeof(*mh);
+	xnpipe_m_rdoff(mh) = 0;
+	state->ionrd += xnpipe_m_size(mh);
+
+	if (flags & XNPIPE_URGENT)
+		list_add(&mh->link, &state->outq);
+	else
+		list_add_tail(&mh->link, &state->outq);
+
+	state->nroutq++;
+
+	if ((state->status & XNPIPE_USER_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return (ssize_t) size;
+	}
+
+	if (state->status & XNPIPE_USER_WREAD) {
+		/*
+		 * Wake up the regular Linux task waiting for input
+		 * from the Xenomai side.
+		 */
+		state->status |= XNPIPE_USER_WREAD_READY;
+		need_sched = 1;
+	}
+
+	if (state->asyncq) {	/* Schedule asynch sig. */
+		state->status |= XNPIPE_USER_SIGIO;
+		need_sched = 1;
+	}
+
+	if (need_sched)
+		xnpipe_schedule_request();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return (ssize_t) size;
+}
+EXPORT_SYMBOL_GPL(xnpipe_send);
+
+ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size)
+{
+	struct xnpipe_state *state;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	if (size < 0)
+		return -EINVAL;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	xnpipe_m_size(mh) += size;
+	state->ionrd += size;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return (ssize_t) size;
+}
+EXPORT_SYMBOL_GPL(xnpipe_mfixup);
+
+ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, xnticks_t timeout)
+{
+	struct xnpipe_state *state;
+	struct xnpipe_mh *mh;
+	xntmode_t mode;
+	ssize_t ret;
+	int info;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	if (xnsched_interrupt_p())
+		return -EPERM;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		ret = -EBADF;
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * If we received a relative timespec, rescale it to an
+	 * absolute time value based on the monotonic clock.
+	 */
+	mode = XN_RELATIVE;
+	if (timeout != XN_NONBLOCK && timeout != XN_INFINITE) {
+		mode = XN_ABSOLUTE;
+		timeout += xnclock_read_monotonic(&nkclock);
+	}
+
+	for (;;) {
+		if (!list_empty(&state->inq))
+			break;
+
+		if (timeout == XN_NONBLOCK) {
+			ret = -EWOULDBLOCK;
+			goto unlock_and_exit;
+		}
+
+		info = xnsynch_sleep_on(&state->synchbase, timeout, mode);
+		if (info & XNTIMEO) {
+			ret = -ETIMEDOUT;
+			goto unlock_and_exit;
+		}
+		if (info & XNBREAK) {
+			ret = -EINTR;
+			goto unlock_and_exit;
+		}
+		if (info & XNRMID) {
+			ret = -EIDRM;
+			goto unlock_and_exit;
+		}
+	}
+
+	mh = list_get_entry(&state->inq, struct xnpipe_mh, link);
+	*pmh = mh;
+	state->nrinq--;
+	ret = (ssize_t)xnpipe_m_size(mh);
+
+	if (state->status & XNPIPE_USER_WSYNC) {
+		state->status |= XNPIPE_USER_WSYNC_READY;
+		xnpipe_schedule_request();
+	}
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnpipe_recv);
+
+int xnpipe_flush(int minor, int mode)
+{
+	struct xnpipe_state *state;
+	int msgcount;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	msgcount = state->nroutq + state->nrinq;
+
+	if (mode & XNPIPE_OFLUSH)
+		state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);
+
+	if (mode & XNPIPE_IFLUSH)
+		xnpipe_flushq(state, inq, free_ibuf, s);
+
+	if ((state->status & XNPIPE_USER_WSYNC) &&
+	    msgcount > state->nroutq + state->nrinq) {
+		state->status |= XNPIPE_USER_WSYNC_READY;
+		xnpipe_schedule_request();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnpipe_flush);
+
+int xnpipe_pollstate(int minor, unsigned int *mask_r)
+{
+	struct xnpipe_state *state;
+	int ret = 0;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	state = xnpipe_states + minor;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (state->status & XNPIPE_KERN_CONN) {
+		*mask_r = POLLOUT;
+		if (!list_empty(&state->inq))
+			*mask_r |= POLLIN;
+	} else
+		ret = -EIO;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnpipe_pollstate);
+
+/* Must be entered with nklock held, interrupts off. */
+#define xnpipe_cleanup_user_conn(__state, __s)				\
+	do {								\
+		xnpipe_flushq((__state), outq, free_obuf, (__s));	\
+		xnpipe_flushq((__state), inq, free_ibuf, (__s));	\
+		(__state)->status &= ~XNPIPE_USER_CONN;			\
+		if ((__state)->status & XNPIPE_KERN_LCLOSE) {		\
+			(__state)->status &= ~XNPIPE_KERN_LCLOSE;	\
+			xnlock_put_irqrestore(&nklock, (__s));		\
+			(__state)->ops.release((__state)->xstate);	\
+			xnlock_get_irqsave(&nklock, (__s));		\
+			xnpipe_minor_free(xnminor_from_state(__state));	\
+		}							\
+	} while(0)
+
+/*
+ * Open the pipe from user-space.
+ */
+
+static int xnpipe_open(struct inode *inode, struct file *file)
+{
+	int minor, err = 0, sigpending;
+	struct xnpipe_state *state;
+	spl_t s;
+
+	minor = MINOR(inode->i_rdev);
+
+	if (minor >= XNPIPE_NDEVS)
+		return -ENXIO;	/* TssTss... stop playing with mknod() ;o) */
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/* Enforce exclusive open for the message queues. */
+	if (state->status & (XNPIPE_USER_CONN | XNPIPE_USER_LCONN)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBUSY;
+	}
+
+	state->status |= XNPIPE_USER_LCONN;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	file->private_data = state;
+	init_waitqueue_head(&state->readq);
+	init_waitqueue_head(&state->syncq);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	state->status |= XNPIPE_USER_CONN;
+	state->status &= ~XNPIPE_USER_LCONN;
+	state->wcount = 0;
+
+	state->status &=
+		~(XNPIPE_USER_ALL_WAIT | XNPIPE_USER_ALL_READY |
+		  XNPIPE_USER_SIGIO);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		if (file->f_flags & O_NONBLOCK) {
+			xnpipe_cleanup_user_conn(state, s);
+			xnlock_put_irqrestore(&nklock, s);
+			return -EWOULDBLOCK;
+		}
+
+		sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
+					 state->status & XNPIPE_KERN_CONN);
+		if (sigpending) {
+			xnpipe_cleanup_user_conn(state, s);
+			xnlock_put_irqrestore(&nklock, s);
+			return -ERESTARTSYS;
+		}
+	}
+
+	if (err)
+		xnpipe_cleanup_user_conn(state, s);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+static int xnpipe_release(struct inode *inode, struct file *file)
+{
+	struct xnpipe_state *state = file->private_data;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnpipe_dequeue_all(state, XNPIPE_USER_WREAD);
+	xnpipe_dequeue_all(state, XNPIPE_USER_WSYNC);
+
+	if (state->status & XNPIPE_KERN_CONN) {
+		/* Unblock waiters. */
+		if (xnsynch_pended_p(&state->synchbase)) {
+			xnsynch_flush(&state->synchbase, XNRMID);
+			xnsched_run();
+		}
+	}
+
+	if (state->ops.input)
+		state->ops.input(NULL, -EPIPE, state->xstate);
+
+	if (state->asyncq) {	/* Clear the async queue */
+		list_del(&state->alink);
+		state->status &= ~XNPIPE_USER_SIGIO;
+		xnlock_put_irqrestore(&nklock, s);
+		fasync_helper(-1, file, 0, &state->asyncq);
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	xnpipe_cleanup_user_conn(state, s);
+	/*
+	 * The extra state may not be available from now on, if
+	 * xnpipe_disconnect() entered lingering close before we got
+	 * there; so calling xnpipe_cleanup_user_conn() should be the
+	 * last thing we do.
+	 */
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static ssize_t xnpipe_read(struct file *file,
+			   char *buf, size_t count, loff_t *ppos)
+{
+	struct xnpipe_state *state = file->private_data;
+	int sigpending, err = 0;
+	size_t nbytes, inbytes;
+	struct xnpipe_mh *mh;
+	ssize_t ret;
+	spl_t s;
+
+	if (!access_wok(buf, count))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EPIPE;
+	}
+	/*
+	 * Queue probe and proc enqueuing must be seen atomically,
+	 * including from the Xenomai side.
+	 */
+	if (list_empty(&state->outq)) {
+		if (file->f_flags & O_NONBLOCK) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EWOULDBLOCK;
+		}
+
+		sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
+					 !list_empty(&state->outq));
+
+		if (list_empty(&state->outq)) {
+			xnlock_put_irqrestore(&nklock, s);
+			return sigpending ? -ERESTARTSYS : 0;
+		}
+	}
+
+	mh = list_get_entry(&state->outq, struct xnpipe_mh, link);
+	state->nroutq--;
+
+	/*
+	 * We allow more data to be appended to the current message
+	 * bucket while its contents is being copied to the user
+	 * buffer, therefore, we need to loop until: 1) all the data
+	 * has been copied, 2) we consumed the user buffer space
+	 * entirely.
+	 */
+
+	inbytes = 0;
+
+	for (;;) {
+		nbytes = xnpipe_m_size(mh) - xnpipe_m_rdoff(mh);
+
+		if (nbytes + inbytes > count)
+			nbytes = count - inbytes;
+
+		if (nbytes == 0)
+			break;
+
+		xnlock_put_irqrestore(&nklock, s);
+
+		/* More data could be appended while doing this: */
+		err = __copy_to_user(buf + inbytes,
+				     xnpipe_m_data(mh) + xnpipe_m_rdoff(mh),
+				     nbytes);
+
+		xnlock_get_irqsave(&nklock, s);
+
+		if (err) {
+			err = -EFAULT;
+			break;
+		}
+
+		inbytes += nbytes;
+		xnpipe_m_rdoff(mh) += nbytes;
+	}
+
+	state->ionrd -= inbytes;
+	ret = inbytes;
+
+	if (xnpipe_m_size(mh) > xnpipe_m_rdoff(mh)) {
+		list_add(&mh->link, &state->outq);
+		state->nroutq++;
+	} else {
+		/*
+		 * We always want to fire the output handler because
+		 * whatever the error state is for userland (e.g
+		 * -EFAULT), we did pull a message from our output
+		 * queue.
+		 */
+		if (state->ops.output)
+			state->ops.output(mh, state->xstate);
+		xnlock_put_irqrestore(&nklock, s);
+		state->ops.free_obuf(mh, state->xstate);
+		xnlock_get_irqsave(&nklock, s);
+		if (state->status & XNPIPE_USER_WSYNC) {
+			state->status |= XNPIPE_USER_WSYNC_READY;
+			xnpipe_schedule_request();
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err ? : ret;
+}
+
+static ssize_t xnpipe_write(struct file *file,
+			    const char *buf, size_t count, loff_t *ppos)
+{
+	struct xnpipe_state *state = file->private_data;
+	struct xnpipe_mh *mh;
+	int pollnum, ret;
+	spl_t s;
+
+	if (count == 0)
+		return 0;
+
+	if (!access_rok(buf, count))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+retry:
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EPIPE;
+	}
+
+	pollnum = state->nrinq + state->nroutq;
+	xnlock_put_irqrestore(&nklock, s);
+
+	mh = state->ops.alloc_ibuf(count + sizeof(*mh), state->xstate);
+	if (mh == (struct xnpipe_mh *)-1)
+		return -ENOMEM;
+
+	if (mh == NULL) {
+		if (file->f_flags & O_NONBLOCK)
+			return -EWOULDBLOCK;
+
+		xnlock_get_irqsave(&nklock, s);
+		if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
+				pollnum > state->nrinq + state->nroutq)) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -ERESTARTSYS;
+		}
+		goto retry;
+	}
+
+	xnpipe_m_size(mh) = count;
+	xnpipe_m_rdoff(mh) = 0;
+
+	if (copy_from_user(xnpipe_m_data(mh), buf, count)) {
+		state->ops.free_ibuf(mh, state->xstate);
+		return -EFAULT;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	list_add_tail(&mh->link, &state->inq);
+	state->nrinq++;
+
+	/* Wake up a Xenomai sleeper if any. */
+	if (xnsynch_wakeup_one_sleeper(&state->synchbase))
+		xnsched_run();
+
+	if (state->ops.input) {
+		ret = state->ops.input(mh, 0, state->xstate);
+		if (ret)
+			count = (size_t)ret;
+	}
+
+	if (file->f_flags & O_SYNC) {
+		if (!list_empty(&state->inq)) {
+			if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
+					list_empty(&state->inq)))
+				count = -ERESTARTSYS;
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return (ssize_t)count;
+}
+
+static long xnpipe_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct xnpipe_state *state = file->private_data;
+	int ret = 0;
+	ssize_t n;
+	spl_t s;
+
+	switch (cmd) {
+	case XNPIPEIOC_GET_NRDEV:
+
+		if (put_user(XNPIPE_NDEVS, (int *)arg))
+			return -EFAULT;
+
+		break;
+
+	case XNPIPEIOC_OFLUSH:
+
+		xnlock_get_irqsave(&nklock, s);
+
+		if ((state->status & XNPIPE_KERN_CONN) == 0) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EPIPE;
+		}
+
+		n = xnpipe_flushq(state, outq, free_obuf, s);
+		state->ionrd -= n;
+		goto kick_wsync;
+
+	case XNPIPEIOC_IFLUSH:
+
+		xnlock_get_irqsave(&nklock, s);
+
+		if ((state->status & XNPIPE_KERN_CONN) == 0) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EPIPE;
+		}
+
+		n = xnpipe_flushq(state, inq, free_ibuf, s);
+
+	kick_wsync:
+
+		if (n > 0 && (state->status & XNPIPE_USER_WSYNC)) {
+			state->status |= XNPIPE_USER_WSYNC_READY;
+			xnpipe_schedule_request();
+		}
+
+		xnlock_put_irqrestore(&nklock, s);
+		ret = n;
+		break;
+
+	case XNPIPEIOC_SETSIG:
+
+		if (arg < 1 || arg >= _NSIG)
+			return -EINVAL;
+
+		xnpipe_asyncsig = arg;
+		break;
+
+	case FIONREAD:
+
+		n = (state->status & XNPIPE_KERN_CONN) ? state->ionrd : 0;
+
+		if (put_user(n, (int *)arg))
+			return -EFAULT;
+
+		break;
+
+	case TCGETS:
+		/* For isatty() probing. */
+		return -ENOTTY;
+
+	default:
+
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * Could be replaced with compat_ptr_ioctl if support for kernels < 5.4 is
+ * dropped.
+ */
+static long xnpipe_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	return xnpipe_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define xnpipe_compat_ioctl	NULL
+#endif
+
+static int xnpipe_fasync(int fd, struct file *file, int on)
+{
+	struct xnpipe_state *state = file->private_data;
+	int ret, queued;
+	spl_t s;
+
+	queued = (state->asyncq != NULL);
+	ret = fasync_helper(fd, file, on, &state->asyncq);
+
+	if (state->asyncq) {
+		if (!queued) {
+			xnlock_get_irqsave(&nklock, s);
+			list_add_tail(&state->alink, &xnpipe_asyncq);
+			xnlock_put_irqrestore(&nklock, s);
+		}
+	} else if (queued) {
+		xnlock_get_irqsave(&nklock, s);
+		list_del(&state->alink);
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return ret;
+}
+
+static unsigned xnpipe_poll(struct file *file, poll_table *pt)
+{
+	struct xnpipe_state *state = file->private_data;
+	unsigned r_mask = 0, w_mask = 0;
+	spl_t s;
+
+	poll_wait(file, &state->readq, pt);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (state->status & XNPIPE_KERN_CONN)
+		w_mask |= (POLLOUT | POLLWRNORM);
+	else
+		r_mask |= POLLHUP;
+
+	if (!list_empty(&state->outq))
+		r_mask |= (POLLIN | POLLRDNORM);
+	else
+		/*
+		 * Procs which have issued a timed out poll req will
+		 * remain linked to the sleepers queue, and will be
+		 * silently unlinked the next time the Xenomai side
+		 * kicks xnpipe_wakeup_proc().
+		 */
+		xnpipe_enqueue_wait(state, XNPIPE_USER_WREAD);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return r_mask | w_mask;
+}
+
+static struct file_operations xnpipe_fops = {
+	.read = xnpipe_read,
+	.write = xnpipe_write,
+	.poll = xnpipe_poll,
+	.unlocked_ioctl = xnpipe_ioctl,
+	.compat_ioctl = xnpipe_compat_ioctl,
+	.open = xnpipe_open,
+	.release = xnpipe_release,
+	.fasync = xnpipe_fasync
+};
+
+int xnpipe_mount(void)
+{
+	struct xnpipe_state *state;
+	struct device *cldev;
+	int i;
+
+	for (state = &xnpipe_states[0];
+	     state < &xnpipe_states[XNPIPE_NDEVS]; state++) {
+		state->status = 0;
+		state->asyncq = NULL;
+		INIT_LIST_HEAD(&state->inq);
+		state->nrinq = 0;
+		INIT_LIST_HEAD(&state->outq);
+		state->nroutq = 0;
+	}
+
+	xnpipe_class = class_create(THIS_MODULE, "rtpipe");
+	if (IS_ERR(xnpipe_class)) {
+		printk(XENO_ERR "error creating rtpipe class, err=%ld\n",
+		       PTR_ERR(xnpipe_class));
+		return -EBUSY;
+	}
+
+	for (i = 0; i < XNPIPE_NDEVS; i++) {
+		cldev = device_create(xnpipe_class, NULL,
+				      MKDEV(XNPIPE_DEV_MAJOR, i),
+				      NULL, "rtp%d", i);
+		if (IS_ERR(cldev)) {
+			printk(XENO_ERR
+			       "can't add device class, major=%d, minor=%d, err=%ld\n",
+			       XNPIPE_DEV_MAJOR, i, PTR_ERR(cldev));
+			class_destroy(xnpipe_class);
+			return -EBUSY;
+		}
+	}
+
+	if (register_chrdev(XNPIPE_DEV_MAJOR, "rtpipe", &xnpipe_fops)) {
+		printk(XENO_ERR
+		       "unable to reserve major #%d for message pipes\n",
+		       XNPIPE_DEV_MAJOR);
+		return -EBUSY;
+	}
+
+	xnpipe_wakeup_virq = pipeline_create_inband_sirq(xnpipe_wakeup_proc);
+	if (xnpipe_wakeup_virq < 0) {
+		printk(XENO_ERR
+		       "unable to reserve synthetic IRQ for message pipes\n");
+		return xnpipe_wakeup_virq;
+	}
+
+	return 0;
+}
+
+void xnpipe_umount(void)
+{
+	int i;
+
+	pipeline_delete_inband_sirq(xnpipe_wakeup_virq);
+
+	unregister_chrdev(XNPIPE_DEV_MAJOR, "rtpipe");
+
+	for (i = 0; i < XNPIPE_NDEVS; i++)
+		device_destroy(xnpipe_class, MKDEV(XNPIPE_DEV_MAJOR, i));
+
+	class_destroy(xnpipe_class);
+}
+++ linux-patched/kernel/xenomai/debug.c	2022-03-21 12:58:28.731895378 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/xenomai/timer.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/ctype.h>
+#include <linux/jhash.h>
+#include <linux/mm.h>
+#include <linux/signal.h>
+#include <linux/vmalloc.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/uapi/signal.h>
+#include <asm/xenomai/syscall.h>
+#include "posix/process.h"
+#include "debug.h"
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_debug Debugging services
+ * @{
+ */
+struct xnvfile_directory cobalt_debug_vfroot;
+EXPORT_SYMBOL_GPL(cobalt_debug_vfroot);
+
+#ifdef CONFIG_XENO_OPT_DEBUG_TRACE_RELAX
+
+#define SYMBOL_HSLOTS	(1 << 8)
+
+struct hashed_symbol {
+	struct hashed_symbol *next;
+	char symbol[0];
+};
+
+static struct hashed_symbol *symbol_jhash[SYMBOL_HSLOTS];
+
+static struct xnheap memory_pool;
+
+/*
+ * This is a permanent storage for ASCII strings which comes handy to
+ * get a unique and constant reference to a symbol while preserving
+ * storage space. Hashed symbols have infinite lifetime and are never
+ * flushed.
+ */
+DEFINE_PRIVATE_XNLOCK(symbol_lock);
+
+static const char *hash_symbol(const char *symbol)
+{
+	struct hashed_symbol *p, **h;
+	const char *str;
+	size_t len;
+	u32 hash;
+	spl_t s;
+
+	len = strlen(symbol);
+	hash = jhash(symbol, len, 0);
+
+	xnlock_get_irqsave(&symbol_lock, s);
+
+	h = &symbol_jhash[hash & (SYMBOL_HSLOTS - 1)];
+	p = *h;
+	while (p &&
+	       (*p->symbol != *symbol ||
+		strcmp(p->symbol + 1, symbol + 1)))
+	       p = p->next;
+
+	if (p)
+		goto done;
+
+	p = xnheap_alloc(&memory_pool, sizeof(*p) + len + 1);
+	if (p == NULL) {
+		str = NULL;
+		goto out;
+	}
+
+	strcpy(p->symbol, symbol);
+	p->next = *h;
+	*h = p;
+done:
+	str = p->symbol;
+out:
+	xnlock_put_irqrestore(&symbol_lock, s);
+
+	return str;
+}
+
+/*
+ * We define a static limit (RELAX_SPOTNR) for spot records to limit
+ * the memory consumption (we pull record memory from the system
+ * heap). The current value should be reasonable enough unless the
+ * application is extremely unsane, given that we only keep unique
+ * spots. Said differently, if the application has more than
+ * RELAX_SPOTNR distinct code locations doing spurious relaxes, then
+ * the first issue to address is likely PEBKAC.
+ */
+#define RELAX_SPOTNR	128
+#define RELAX_HSLOTS	(1 << 8)
+
+struct relax_record {
+	/* Number of hits for this location */
+	u32 hits;
+	struct relax_spot {
+		/* Faulty thread name. */
+		char thread[XNOBJECT_NAME_LEN];
+		/* call stack the relax originates from. */
+		int depth;
+		struct backtrace {
+			unsigned long pc;
+			const char *mapname;
+		} backtrace[SIGSHADOW_BACKTRACE_DEPTH];
+		/* Program hash value of the caller. */
+		u32 proghash;
+		/* Pid of the caller. */
+		pid_t pid;
+		/* Reason for relaxing. */
+		int reason;
+	} spot;
+	struct relax_record *r_next;
+	struct relax_record *h_next;
+	const char *exe_path;
+};
+
+static struct relax_record *relax_jhash[RELAX_HSLOTS];
+
+static struct relax_record *relax_record_list;
+
+static int relax_overall, relax_queued;
+
+DEFINE_PRIVATE_XNLOCK(relax_lock);
+
+/*
+ * The motivation to centralize tracing information about relaxes
+ * directly into kernel space is fourfold:
+ *
+ * - this allows to gather all the trace data into a single location
+ * and keep it safe there, with no external log file involved.
+ *
+ * - enabling the tracing does not impose any requirement on the
+ * application (aside of being compiled with debug symbols for best
+ * interpreting that information). We only need a kernel config switch
+ * for this (i.e. CONFIG_XENO_OPT_DEBUG_TRACE_RELAX).
+ *
+ * - the data is collected and can be made available exactly the same
+ * way regardless of the application emitting the relax requests, or
+ * whether it is still alive when the trace data are displayed.
+ *
+ * - the kernel is able to provide accurate and detailed trace
+ * information, such as the relative offset of instructions causing
+ * relax requests within dynamic shared objects, without having to
+ * guess it roughly from /proc/pid/maps, or relying on ldd's
+ * --function-relocs feature, which both require to run on the target
+ * system to get the needed information. Instead, we allow a build
+ * host to use a cross-compilation toolchain later to extract the
+ * source location, from the raw data the kernel has provided on the
+ * target system.
+ *
+ * However, collecting the call frames within the application to
+ * determine the full context of a relax spot is not something we can
+ * do purely from kernel space, notably because it depends on build
+ * options we just don't know about (e.g. frame pointers availability
+ * for the app, or other nitty-gritty details depending on the
+ * toolchain). To solve this, we ask the application to send us a
+ * complete backtrace taken from the context of a specific signal
+ * handler, which we know is stacked over the relax spot. That
+ * information is then stored by the kernel after some
+ * post-processing, along with other data identifying the caller, and
+ * made available through the /proc/xenomai/debug/relax vfile.
+ *
+ * Implementation-wise, xndebug_notify_relax and xndebug_trace_relax
+ * routines are paired: first, xndebug_notify_relax sends a SIGSHADOW
+ * request to userland when a relax spot is detected from
+ * xnthread_relax, which should then trigger a call back to
+ * xndebug_trace_relax with the complete backtrace information, as
+ * seen from userland (via the internal sc_cobalt_backtrace
+ * syscall). All this runs on behalf of the relaxing thread, so we can
+ * make a number of convenient assumptions (such as being able to scan
+ * the current vma list to get detailed information about the
+ * executable mappings that could be involved).
+ */
+
+void xndebug_notify_relax(struct xnthread *thread, int reason)
+{
+	xnthread_signal(thread, SIGSHADOW,
+			  sigshadow_int(SIGSHADOW_ACTION_BACKTRACE, reason));
+}
+
+void xndebug_trace_relax(int nr, unsigned long *backtrace,
+			 int reason)
+{
+	struct relax_record *p, **h;
+	struct vm_area_struct *vma;
+	struct xnthread *thread;
+	struct relax_spot spot;
+	struct mm_struct *mm;
+	struct file *file;
+	unsigned long pc;
+	char *mapname;
+	int n, depth;
+	char *tmp;
+	u32 hash;
+	spl_t s;
+
+	thread = xnthread_current();
+	if (thread == NULL)
+		return;		/* Can't be, right? What a mess. */
+
+	/*
+	 * We compute PC values relative to the base of the shared
+	 * executable mappings we find in the backtrace, which makes
+	 * it possible for the slackspot utility to match the
+	 * corresponding source code locations from unrelocated file
+	 * offsets.
+	 */
+
+	tmp = (char *)__get_free_page(GFP_KERNEL);
+	if (tmp == NULL)
+		/*
+		 * The situation looks really bad, but we can't do
+		 * anything about it. Just bail out.
+		 */
+		return;
+
+	memset(&spot, 0, sizeof(spot));
+	mm = get_task_mm(current);
+	mmap_read_lock(mm);
+
+	for (n = 0, depth = 0; n < nr; n++) {
+		pc = backtrace[n];
+
+		vma = find_vma(mm, pc);
+		if (vma == NULL)
+			continue;
+
+		/*
+		 * Hack. Unlike DSOs, executables and interpreters
+		 * (e.g. dynamic linkers) are protected against write
+		 * attempts. Use this to determine when $pc should be
+		 * fixed up by subtracting the mapping base address in
+		 * the DSO case.
+		 */
+		if (!(vma->vm_flags & VM_DENYWRITE))
+			pc -= vma->vm_start;
+
+		spot.backtrace[depth].pc = pc;
+
+		/*
+		 * Even in case we can't fetch the map name, we still
+		 * record the PC value, which may still give some hint
+		 * downstream.
+		 */
+		file = vma->vm_file;
+		if (file == NULL)
+			goto next_frame;
+
+		mapname = d_path(&file->f_path, tmp, PAGE_SIZE);
+		if (IS_ERR(mapname))
+			goto next_frame;
+
+		spot.backtrace[depth].mapname = hash_symbol(mapname);
+	next_frame:
+		depth++;
+	}
+
+	mmap_read_unlock(mm);
+	mmput(mm);
+	free_page((unsigned long)tmp);
+
+	/*
+	 * Most of the time we will be sent duplicates, since the odds
+	 * of seeing the same thread running the same code doing the
+	 * same mistake all over again are high. So we probe the hash
+	 * table for an identical spot first, before going for a
+	 * complete record allocation from the system heap if no match
+	 * was found. Otherwise, we just take the fast exit path.
+	 */
+	spot.depth = depth;
+	spot.proghash = thread->proghash;
+	spot.pid = xnthread_host_pid(thread);
+	spot.reason = reason;
+	strcpy(spot.thread, thread->name);
+	hash = jhash2((u32 *)&spot, sizeof(spot) / sizeof(u32), 0);
+
+	xnlock_get_irqsave(&relax_lock, s);
+
+	h = &relax_jhash[hash & (RELAX_HSLOTS - 1)];
+	p = *h;
+	while (p &&
+	       /* Try quick guesses first, then memcmp */
+	       (p->spot.depth != spot.depth ||
+		p->spot.pid != spot.pid ||
+		memcmp(&p->spot, &spot, sizeof(spot))))
+	       p = p->h_next;
+
+	if (p) {
+		p->hits++;
+		goto out;	/* Spot already recorded. */
+	}
+
+	if (relax_queued >= RELAX_SPOTNR)
+		goto out;	/* No more space -- ignore. */
+	/*
+	 * We can only compete with other shadows which have just
+	 * switched to secondary mode like us. So holding the
+	 * relax_lock a bit more without disabling interrupts is not
+	 * an issue. This allows us to postpone the record memory
+	 * allocation while probing and updating the hash table in a
+	 * single move.
+	 */
+	p = xnheap_alloc(&memory_pool, sizeof(*p));
+	if (p == NULL)
+		goto out;      /* Something is about to go wrong... */
+
+	memcpy(&p->spot, &spot, sizeof(p->spot));
+	p->exe_path = hash_symbol(thread->exe_path);
+	p->hits = 1;
+	p->h_next = *h;
+	*h = p;
+	p->r_next = relax_record_list;
+	relax_record_list = p;
+	relax_queued++;
+out:
+	relax_overall++;
+
+	xnlock_put_irqrestore(&relax_lock, s);
+}
+
+static DEFINE_VFILE_HOSTLOCK(relax_mutex);
+
+struct relax_vfile_priv {
+	int queued;
+	int overall;
+	int ncurr;
+	struct relax_record *head;
+	struct relax_record *curr;
+};
+
+static void *relax_vfile_begin(struct xnvfile_regular_iterator *it)
+{
+	struct relax_vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct relax_record *p;
+	spl_t s;
+	int n;
+
+	/*
+	 * Snapshot the counters under lock, to make sure they remain
+	 * mutually consistent despite we dump the record list in a
+	 * lock-less manner. Additionally, the vfile layer already
+	 * holds the relax_mutex lock for us, so that we can't race
+	 * with ->store().
+	 */
+	xnlock_get_irqsave(&relax_lock, s);
+
+	if (relax_queued == 0 || it->pos > relax_queued) {
+		xnlock_put_irqrestore(&relax_lock, s);
+		return NULL;
+	}
+	priv->overall = relax_overall;
+	priv->queued = relax_queued;
+	priv->head = relax_record_list;
+
+	xnlock_put_irqrestore(&relax_lock, s);
+
+	if (it->pos == 0) {
+		priv->curr = NULL;
+		priv->ncurr = -1;
+		return VFILE_SEQ_START;
+	}
+
+	for (n = 1, p = priv->head; n < it->pos; n++)
+		p = p->r_next;
+
+	priv->curr = p;
+	priv->ncurr = n;
+
+	return p;
+}
+
+static void *relax_vfile_next(struct xnvfile_regular_iterator *it)
+{
+	struct relax_vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct relax_record *p;
+	int n;
+
+	if (it->pos > priv->queued)
+		return NULL;
+
+	if (it->pos == priv->ncurr + 1)
+		p = priv->curr->r_next;
+	else {
+		for (n = 1, p = priv->head; n < it->pos; n++)
+			p = p->r_next;
+	}
+
+	priv->curr = p;
+	priv->ncurr = it->pos;
+
+	return p;
+}
+
+static const char *reason_str[] = {
+    [SIGDEBUG_UNDEFINED] = "undefined",
+    [SIGDEBUG_MIGRATE_SIGNAL] = "signal",
+    [SIGDEBUG_MIGRATE_SYSCALL] = "syscall",
+    [SIGDEBUG_MIGRATE_FAULT] = "fault",
+    [SIGDEBUG_MIGRATE_PRIOINV] = "pi-error",
+    [SIGDEBUG_NOMLOCK] = "mlock-check",
+    [SIGDEBUG_WATCHDOG] = "runaway-break",
+    [SIGDEBUG_RESCNT_IMBALANCE] = "resource-count-imbalance",
+    [SIGDEBUG_MUTEX_SLEEP] = "sleep-holding-mutex",
+    [SIGDEBUG_LOCK_BREAK] = "scheduler-lock-break",
+};
+
+static int relax_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct relax_vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct relax_record *p = data;
+	int n;
+
+	/*
+	 * No need to grab any lock to read a record from a previously
+	 * validated index: the data must be there and won't be
+	 * touched anymore.
+	 */
+	if (p == NULL) {
+		xnvfile_printf(it, "%d\n", priv->overall);
+		return 0;
+	}
+
+	xnvfile_printf(it, "%s\n", p->exe_path ?: "?");
+	xnvfile_printf(it, "%d %d %s %s\n", p->spot.pid, p->hits,
+		       reason_str[p->spot.reason], p->spot.thread);
+
+	for (n = 0; n < p->spot.depth; n++)
+		xnvfile_printf(it, "0x%lx %s\n",
+			       p->spot.backtrace[n].pc,
+			       p->spot.backtrace[n].mapname ?: "?");
+
+	xnvfile_printf(it, ".\n");
+
+	return 0;
+}
+
+static ssize_t relax_vfile_store(struct xnvfile_input *input)
+{
+	struct relax_record *p, *np;
+	spl_t s;
+
+	/*
+	 * Flush out all records. Races with ->show() are prevented
+	 * using the relax_mutex lock. The vfile layer takes care of
+	 * this internally.
+	 */
+	xnlock_get_irqsave(&relax_lock, s);
+	p = relax_record_list;
+	relax_record_list = NULL;
+	relax_overall = 0;
+	relax_queued = 0;
+	memset(relax_jhash, 0, sizeof(relax_jhash));
+	xnlock_put_irqrestore(&relax_lock, s);
+
+	while (p) {
+		np = p->r_next;
+		xnheap_free(&memory_pool, p);
+		p = np;
+	}
+
+	return input->size;
+}
+
+static struct xnvfile_regular_ops relax_vfile_ops = {
+	.begin = relax_vfile_begin,
+	.next = relax_vfile_next,
+	.show = relax_vfile_show,
+	.store = relax_vfile_store,
+};
+
+static struct xnvfile_regular relax_vfile = {
+	.privsz = sizeof(struct relax_vfile_priv),
+	.ops = &relax_vfile_ops,
+	.entry = { .lockops = &relax_mutex.ops },
+};
+
+static inline int init_trace_relax(void)
+{
+	u32 size = CONFIG_XENO_OPT_DEBUG_TRACE_LOGSZ * 1024;
+	void *p;
+	int ret;
+
+	p = vmalloc(size);
+	if (p == NULL)
+		return -ENOMEM;
+
+	ret = xnheap_init(&memory_pool, p, size);
+	if (ret)
+		return ret;
+
+	xnheap_set_name(&memory_pool, "debug log");
+
+	ret = xnvfile_init_regular("relax", &relax_vfile, &cobalt_debug_vfroot);
+	if (ret) {
+		xnheap_destroy(&memory_pool);
+		vfree(p);
+	}
+
+	return ret;
+}
+
+static inline void cleanup_trace_relax(void)
+{
+	void *p;
+
+	xnvfile_destroy_regular(&relax_vfile);
+	p = xnheap_get_membase(&memory_pool);
+	xnheap_destroy(&memory_pool);
+	vfree(p);
+}
+
+#else /* !CONFIG_XENO_OPT_DEBUG_TRACE_RELAX */
+
+static inline int init_trace_relax(void)
+{
+	return 0;
+}
+
+static inline void cleanup_trace_relax(void)
+{
+}
+
+static inline void init_thread_relax_trace(struct xnthread *thread)
+{
+}
+
+#endif /* !XENO_OPT_DEBUG_TRACE_RELAX */
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+
+void xnlock_dbg_prepare_acquire(unsigned long long *start)
+{
+	*start = xnclock_read_raw(&nkclock);
+}
+EXPORT_SYMBOL_GPL(xnlock_dbg_prepare_acquire);
+
+void xnlock_dbg_acquired(struct xnlock *lock, int cpu, unsigned long long *start,
+			 const char *file, int line, const char *function)
+{
+	lock->lock_date = *start;
+	lock->spin_time = xnclock_read_raw(&nkclock) - *start;
+	lock->file = file;
+	lock->function = function;
+	lock->line = line;
+	lock->cpu = cpu;
+}
+EXPORT_SYMBOL_GPL(xnlock_dbg_acquired);
+
+int xnlock_dbg_release(struct xnlock *lock,
+		       const char *file, int line, const char *function)
+{
+	unsigned long long lock_time;
+	struct xnlockinfo *stats;
+	int cpu;
+
+	lock_time = xnclock_read_raw(&nkclock) - lock->lock_date;
+	cpu = raw_smp_processor_id();
+	stats = &per_cpu(xnlock_stats, cpu);
+
+	if (lock->file == NULL) {
+		lock->file = "??";
+		lock->line = 0;
+		lock->function = "invalid";
+	}
+
+	if (unlikely(lock->owner != cpu)) {
+		pipeline_prepare_panic();
+		printk(XENO_ERR "lock %p already unlocked on CPU #%d\n"
+				"          last owner = %s:%u (%s(), CPU #%d)\n",
+		       lock, cpu, lock->file, lock->line, lock->function,
+		       lock->cpu);
+		dump_stack();
+		return 1;
+	}
+
+	/* File that we released it. */
+	lock->cpu = -lock->cpu;
+	lock->file = file;
+	lock->line = line;
+	lock->function = function;
+
+	if (lock_time > stats->lock_time) {
+		stats->lock_time = lock_time;
+		stats->spin_time = lock->spin_time;
+		stats->file = lock->file;
+		stats->function = lock->function;
+		stats->line = lock->line;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnlock_dbg_release);
+
+#endif /* CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+void xndebug_shadow_init(struct xnthread *thread)
+{
+	struct cobalt_ppd *sys_ppd;
+	size_t len;
+
+	sys_ppd = cobalt_ppd_get(0);
+	/*
+	 * The caller is current, so we know for sure that sys_ppd
+	 * will still be valid after we dropped the lock.
+	 *
+	 * NOTE: Kernel shadows all share the system global ppd
+	 * descriptor with no refcounting.
+	 */
+	thread->exe_path = sys_ppd->exe_path ?: "(unknown)";
+	/*
+	 * The program hash value is a unique token debug features may
+	 * use to identify all threads which belong to a given
+	 * executable file. Using this value for quick probes is often
+	 * handier and more efficient than testing the whole exe_path.
+	 */
+	len = strlen(thread->exe_path);
+	thread->proghash = jhash(thread->exe_path, len, 0);
+}
+
+int xndebug_init(void)
+{
+	int ret;
+
+	ret = init_trace_relax();
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void xndebug_cleanup(void)
+{
+	cleanup_trace_relax();
+}
+
+/** @} */
+++ linux-patched/kernel/xenomai/timer.c	2022-03-21 12:58:28.717895514 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/kernel/Makefile	2022-03-21 12:57:23.892527656 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2007,2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/sched.h>
+#include <pipeline/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/arith.h>
+#include <trace/events/cobalt-core.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_timer Timer services
+ *
+ * The Xenomai timer facility depends on a clock source (xnclock) for
+ * scheduling the next activation times.
+ *
+ * The core provides and depends on a monotonic clock source (nkclock)
+ * with nanosecond resolution, driving the platform timer hardware
+ * exposed by the interrupt pipeline.
+ *
+ * @{
+ */
+
+int xntimer_heading_p(struct xntimer *timer)
+{
+	struct xnsched *sched = timer->sched;
+	xntimerq_t *q;
+	xntimerh_t *h;
+
+	q = xntimer_percpu_queue(timer);
+	h = xntimerq_head(q);
+	if (h == &timer->aplink)
+		return 1;
+
+	if (sched->lflags & XNHDEFER) {
+		h = xntimerq_second(q, h);
+		if (h == &timer->aplink)
+			return 1;
+	}
+
+	return 0;
+}
+
+void xntimer_enqueue_and_program(struct xntimer *timer, xntimerq_t *q)
+{
+	struct xnsched *sched = xntimer_sched(timer);
+
+	xntimer_enqueue(timer, q);
+	if (pipeline_must_force_program_tick(sched) || xntimer_heading_p(timer)) {
+		struct xnsched *sched = xntimer_sched(timer);
+		struct xnclock *clock = xntimer_clock(timer);
+		if (sched != xnsched_current())
+			xnclock_remote_shot(clock, sched);
+		else
+			xnclock_program_shot(clock, sched);
+	}
+}
+
+/**
+ * Arm a timer.
+ *
+ * Activates a timer so that the associated timeout handler will be
+ * fired after each expiration time. A timer can be either periodic or
+ * one-shot, depending on the reload value passed to this routine. The
+ * given timer must have been previously initialized.
+ *
+ * A timer is attached to the clock specified in xntimer_init().
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @param value The date of the initial timer shot, expressed in
+ * nanoseconds.
+ *
+ * @param interval The reload value of the timer. It is a periodic
+ * interval value to be used for reprogramming the next timer shot,
+ * expressed in nanoseconds. If @a interval is equal to XN_INFINITE,
+ * the timer will not be reloaded after it has expired.
+ *
+ * @param mode The timer mode. It can be XN_RELATIVE if @a value shall
+ * be interpreted as a relative date, XN_ABSOLUTE for an absolute date
+ * based on the monotonic clock of the related time base (as returned
+ * my xnclock_read_monotonic()), or XN_REALTIME if the absolute date
+ * is based on the adjustable real-time date for the relevant clock
+ * (obtained from xnclock_read_realtime()).
+ *
+ * @return 0 is returned upon success, or -ETIMEDOUT if an absolute
+ * date in the past has been given. In such an event, the timer is
+ * nevertheless armed for the next shot in the timeline if @a interval
+ * is different from XN_INFINITE.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+int xntimer_start(struct xntimer *timer,
+		  xnticks_t value, xnticks_t interval,
+		  xntmode_t mode)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+	xntimerq_t *q = xntimer_percpu_queue(timer);
+	xnticks_t date, now, delay, period;
+	unsigned long gravity;
+	int ret = 0;
+
+	atomic_only();
+
+	trace_cobalt_timer_start(timer, value, interval, mode);
+
+	if ((timer->status & XNTIMER_DEQUEUED) == 0)
+		xntimer_dequeue(timer, q);
+
+	now = xnclock_read_raw(clock);
+
+	timer->status &= ~(XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC);
+	switch (mode) {
+	case XN_RELATIVE:
+		if ((xnsticks_t)value < 0)
+			return -ETIMEDOUT;
+		date = xnclock_ns_to_ticks(clock, value) + now;
+		break;
+	case XN_REALTIME:
+		timer->status |= XNTIMER_REALTIME;
+		value -= xnclock_get_offset(clock);
+		fallthrough;
+	default: /* XN_ABSOLUTE || XN_REALTIME */
+		date = xnclock_ns_to_ticks(clock, value);
+		if ((xnsticks_t)(date - now) <= 0) {
+			if (interval == XN_INFINITE)
+				return -ETIMEDOUT;
+			/*
+			 * We are late on arrival for the first
+			 * delivery, wait for the next shot on the
+			 * periodic time line.
+			 */
+			delay = now - date;
+			period = xnclock_ns_to_ticks(clock, interval);
+			date += period * (xnarch_div64(delay, period) + 1);
+		}
+		break;
+	}
+
+	/*
+	 * To cope with the basic system latency, we apply a clock
+	 * gravity value, which is the amount of time expressed in
+	 * clock ticks by which we should anticipate the shot for any
+	 * outstanding timer. The gravity value varies with the type
+	 * of context the timer wakes up, i.e. irq handler, kernel or
+	 * user thread.
+	 */
+	gravity = xntimer_gravity(timer);
+	xntimerh_date(&timer->aplink) = date - gravity;
+	if (now >= xntimerh_date(&timer->aplink))
+		xntimerh_date(&timer->aplink) += gravity / 2;
+
+	timer->interval_ns = XN_INFINITE;
+	timer->interval = XN_INFINITE;
+	if (interval != XN_INFINITE) {
+		timer->interval_ns = interval;
+		timer->interval = xnclock_ns_to_ticks(clock, interval);
+		timer->periodic_ticks = 0;
+		timer->start_date = date;
+		timer->pexpect_ticks = 0;
+		timer->status |= XNTIMER_PERIODIC;
+	}
+
+	timer->status |= XNTIMER_RUNNING;
+	xntimer_enqueue_and_program(timer, q);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xntimer_start);
+
+/**
+ * @fn int xntimer_stop(struct xntimer *timer)
+ *
+ * @brief Disarm a timer.
+ *
+ * This service deactivates a timer previously armed using
+ * xntimer_start(). Once disarmed, the timer can be subsequently
+ * re-armed using the latter service.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+void __xntimer_stop(struct xntimer *timer)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+	xntimerq_t *q = xntimer_percpu_queue(timer);
+	struct xnsched *sched;
+	int heading = 1;
+
+	atomic_only();
+
+	trace_cobalt_timer_stop(timer);
+
+	if ((timer->status & XNTIMER_DEQUEUED) == 0) {
+		heading = xntimer_heading_p(timer);
+		xntimer_dequeue(timer, q);
+	}
+	timer->status &= ~(XNTIMER_FIRED|XNTIMER_RUNNING);
+	sched = xntimer_sched(timer);
+
+	/*
+	 * If we removed the heading timer, reprogram the next shot if
+	 * any. If the timer was running on another CPU, let it tick.
+	 */
+	if (heading && sched == xnsched_current())
+		xnclock_program_shot(clock, sched);
+}
+EXPORT_SYMBOL_GPL(__xntimer_stop);
+
+/**
+ * @fn xnticks_t xntimer_get_date(struct xntimer *timer)
+ *
+ * @brief Return the absolute expiration date.
+ *
+ * Return the next expiration date of a timer as an absolute count of
+ * nanoseconds.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @return The expiration date in nanoseconds. The special value
+ * XN_INFINITE is returned if @a timer is currently disabled.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+xnticks_t xntimer_get_date(struct xntimer *timer)
+{
+	atomic_only();
+
+	if (!xntimer_running_p(timer))
+		return XN_INFINITE;
+
+	return xnclock_ticks_to_ns(xntimer_clock(timer), xntimer_expiry(timer));
+}
+EXPORT_SYMBOL_GPL(xntimer_get_date);
+
+/**
+ * @fn xnticks_t xntimer_get_timeout(struct xntimer *timer)
+ *
+ * @brief Return the relative expiration date.
+ *
+ * This call returns the count of nanoseconds remaining until the
+ * timer expires.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @return The count of nanoseconds until expiry. The special value
+ * XN_INFINITE is returned if @a timer is currently disabled.  It
+ * might happen that the timer expires when this service runs (even if
+ * the associated handler has not been fired yet); in such a case, 1
+ * is returned.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+xnticks_t __xntimer_get_timeout(struct xntimer *timer)
+{
+	struct xnclock *clock;
+	xnticks_t expiry, now;
+
+	atomic_only();
+
+	clock = xntimer_clock(timer);
+	now = xnclock_read_raw(clock);
+	expiry = xntimer_expiry(timer);
+	if (expiry < now)
+		return 1;  /* Will elapse shortly. */
+
+	return xnclock_ticks_to_ns(clock, expiry - now);
+}
+EXPORT_SYMBOL_GPL(__xntimer_get_timeout);
+
+/**
+ * @fn void xntimer_init(struct xntimer *timer,struct xnclock *clock,void (*handler)(struct xntimer *timer), struct xnsched *sched, int flags)
+ * @brief Initialize a timer object.
+ *
+ * Creates a timer. When created, a timer is left disarmed; it must be
+ * started using xntimer_start() in order to be activated.
+ *
+ * @param timer The address of a timer descriptor the nucleus will use
+ * to store the object-specific data.  This descriptor must always be
+ * valid while the object is active therefore it must be allocated in
+ * permanent memory.
+ *
+ * @param clock The clock the timer relates to. Xenomai defines a
+ * monotonic system clock, with nanosecond resolution, named
+ * nkclock. In addition, external clocks driven by other tick sources
+ * may be created dynamically if CONFIG_XENO_OPT_EXTCLOCK is defined.
+ *
+ * @param handler The routine to call upon expiration of the timer.
+ *
+ * @param sched An optional pointer to the per-CPU scheduler slot the
+ * new timer is affine to. If non-NULL, the timer will fire on the CPU
+ * @a sched is bound to, otherwise it will fire either on the current
+ * CPU if real-time, or on the first real-time CPU.
+ *
+ * @param flags A set of flags describing the timer. A set of clock
+ * gravity hints can be passed via the @a flags argument, used for
+ * optimizing the built-in heuristics aimed at latency reduction:
+ *
+ * - XNTIMER_IGRAVITY, the timer activates a leaf timer handler.
+ * - XNTIMER_KGRAVITY, the timer activates a kernel thread.
+ * - XNTIMER_UGRAVITY, the timer activates a user-space thread.
+ *
+ * There is no limitation on the number of timers which can be
+ * created/active concurrently.
+ *
+ * @coretags{unrestricted}
+ */
+#ifdef DOXYGEN_CPP
+void xntimer_init(struct xntimer *timer, struct xnclock *clock,
+		  void (*handler)(struct xntimer *timer),
+		  struct xnsched *sched,
+		  int flags);
+#endif
+
+void __xntimer_init(struct xntimer *timer,
+		    struct xnclock *clock,
+		    void (*handler)(struct xntimer *timer),
+		    struct xnsched *sched,
+		    int flags)
+{
+	spl_t s __maybe_unused;
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	timer->clock = clock;
+#endif
+	xntimerh_init(&timer->aplink);
+	xntimerh_date(&timer->aplink) = XN_INFINITE;
+	xntimer_set_priority(timer, XNTIMER_STDPRIO);
+	timer->status = (XNTIMER_DEQUEUED|(flags & XNTIMER_INIT_MASK));
+	timer->handler = handler;
+	timer->interval_ns = 0;
+	timer->sched = NULL;
+
+	/*
+	 * Set the timer affinity, preferably to xnsched_cpu(sched) if
+	 * sched was given, CPU0 otherwise.
+	 */
+	if (sched == NULL)
+		sched = xnsched_struct(0);
+
+	xntimer_set_affinity(timer, sched);
+
+#ifdef CONFIG_XENO_OPT_STATS
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	timer->tracker = clock;
+#endif
+	ksformat(timer->name, XNOBJECT_NAME_LEN, "%d/%s",
+		 task_pid_nr(current), current->comm);
+	xntimer_reset_stats(timer);
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&timer->next_stat, &clock->timerq);
+	clock->nrtimers++;
+	xnvfile_touch(&clock->timer_vfile);
+	xnlock_put_irqrestore(&nklock, s);
+#endif /* CONFIG_XENO_OPT_STATS */
+}
+EXPORT_SYMBOL_GPL(__xntimer_init);
+
+void xntimer_set_gravity(struct xntimer *timer, int gravity)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	timer->status &= ~XNTIMER_GRAVITY_MASK;
+	timer->status |= gravity;
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xntimer_set_gravity);
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+static void __xntimer_switch_tracking(struct xntimer *timer,
+				      struct xnclock *newclock)
+{
+	struct xnclock *oldclock = timer->tracker;
+
+	list_del(&timer->next_stat);
+	oldclock->nrtimers--;
+	xnvfile_touch(&oldclock->timer_vfile);
+	list_add_tail(&timer->next_stat, &newclock->timerq);
+	newclock->nrtimers++;
+	xnvfile_touch(&newclock->timer_vfile);
+	timer->tracker = newclock;
+}
+
+void xntimer_switch_tracking(struct xntimer *timer,
+			     struct xnclock *newclock)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xntimer_switch_tracking(timer, newclock);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xntimer_switch_tracking);
+
+#else
+
+static inline
+void __xntimer_switch_tracking(struct xntimer *timer,
+			       struct xnclock *newclock)
+{ }
+
+#endif /* CONFIG_XENO_OPT_STATS */
+
+/**
+ * @brief Set the reference clock of a timer.
+ *
+ * This service changes the reference clock pacing a timer. If the
+ * clock timers are tracked, the tracking information is updated too.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @param newclock The address of a valid clock descriptor.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+void xntimer_set_clock(struct xntimer *timer,
+		       struct xnclock *newclock)
+{
+	atomic_only();
+
+	if (timer->clock != newclock) {
+		xntimer_stop(timer);
+		timer->clock = newclock;
+		/*
+		 * Since the timer was stopped, we can wait until it
+		 * is restarted for fixing its CPU affinity.
+		 */
+		__xntimer_switch_tracking(timer, newclock);
+	}
+}
+
+#endif /* CONFIG_XENO_OPT_EXTCLOCK */
+
+/**
+ * @fn void xntimer_destroy(struct xntimer *timer)
+ *
+ * @brief Release a timer object.
+ *
+ * Destroys a timer. After it has been destroyed, all resources
+ * associated with the timer have been released. The timer is
+ * automatically deactivated before deletion if active on entry.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @coretags{unrestricted}
+ */
+void xntimer_destroy(struct xntimer *timer)
+{
+	struct xnclock *clock __maybe_unused = xntimer_clock(timer);
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_stop(timer);
+	timer->status |= XNTIMER_KILLED;
+	timer->sched = NULL;
+#ifdef CONFIG_XENO_OPT_STATS
+	list_del(&timer->next_stat);
+	clock->nrtimers--;
+	xnvfile_touch(&clock->timer_vfile);
+#endif /* CONFIG_XENO_OPT_STATS */
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xntimer_destroy);
+
+#ifdef CONFIG_SMP
+
+/**
+ * Migrate a timer.
+ *
+ * This call migrates a timer to another cpu. In order to avoid
+ * pathological cases, it must be called from the CPU to which @a
+ * timer is currently attached.
+ *
+ * @param timer The address of the timer object to be migrated.
+ *
+ * @param sched The address of the destination per-CPU scheduler
+ * slot.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched)
+{				/* nklocked, IRQs off, sched != timer->sched */
+	struct xnclock *clock;
+	xntimerq_t *q;
+
+	trace_cobalt_timer_migrate(timer, xnsched_cpu(sched));
+
+	/*
+	 * This assertion triggers when the timer is migrated to a CPU
+	 * for which we do not expect any clock events/IRQs from the
+	 * associated clock device. If so, the timer would never fire
+	 * since clock ticks would never happen on that CPU.
+	 */
+	XENO_WARN_ON_SMP(COBALT,
+			 !cpumask_empty(&xntimer_clock(timer)->affinity) &&
+			 !cpumask_test_cpu(xnsched_cpu(sched),
+					   &xntimer_clock(timer)->affinity));
+
+	if (timer->status & XNTIMER_RUNNING) {
+		xntimer_stop(timer);
+		timer->sched = sched;
+		clock = xntimer_clock(timer);
+		q = xntimer_percpu_queue(timer);
+		xntimer_enqueue(timer, q);
+		if (xntimer_heading_p(timer))
+			xnclock_remote_shot(clock, sched);
+	} else
+		timer->sched = sched;
+}
+EXPORT_SYMBOL_GPL(__xntimer_migrate);
+
+static inline int get_clock_cpu(struct xnclock *clock, int cpu)
+{
+	/*
+	 * Check a CPU number against the possible set of CPUs
+	 * receiving events from the underlying clock device. If the
+	 * suggested CPU does not receive events from this device,
+	 * return the first one which does instead.
+	 *
+	 * A global clock device with no particular IRQ affinity may
+	 * tick on any CPU, but timers should always be queued on
+	 * CPU0.
+	 *
+	 * NOTE: we have scheduler slots initialized for all online
+	 * CPUs, we can program and receive clock ticks on any of
+	 * them. So there is no point in restricting the valid CPU set
+	 * to cobalt_cpu_affinity, which specifically refers to the
+	 * set of CPUs which may run real-time threads. Although
+	 * receiving a clock tick for waking up a thread living on a
+	 * remote CPU is not optimal since this involves IPI-signaled
+	 * rescheds, this is still a valid case.
+	 */
+	if (cpumask_empty(&clock->affinity))
+		return 0;
+
+	if (cpumask_test_cpu(cpu, &clock->affinity))
+		return cpu;
+	
+	return cpumask_first(&clock->affinity);
+}
+
+void __xntimer_set_affinity(struct xntimer *timer, struct xnsched *sched)
+{				/* nklocked, IRQs off */
+	struct xnclock *clock = xntimer_clock(timer);
+	int cpu;
+
+	/*
+	 * Figure out which CPU is best suited for managing this
+	 * timer, preferably picking xnsched_cpu(sched) if the ticking
+	 * device moving the timer clock beats on that CPU. Otherwise,
+	 * pick the first CPU from the clock affinity mask if set. If
+	 * not, the timer is backed by a global device with no
+	 * particular IRQ affinity, so it should always be queued to
+	 * CPU0.
+	 */
+	cpu = 0;
+	if (!cpumask_empty(&clock->affinity))
+		cpu = get_clock_cpu(clock, xnsched_cpu(sched));
+
+	xntimer_migrate(timer, xnsched_struct(cpu));
+}
+EXPORT_SYMBOL_GPL(__xntimer_set_affinity);
+
+#endif /* CONFIG_SMP */
+
+/**
+ * Get the count of overruns for the last tick.
+ *
+ * This service returns the count of pending overruns for the last
+ * tick of a given timer, as measured by the difference between the
+ * expected expiry date of the timer and the date @a now passed as
+ * argument.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @param waiter The thread for which the overrun count is being
+ * collected.
+ *
+ * @param now current date (as
+ * xnclock_read_raw(xntimer_clock(timer)))
+ *
+ * @return the number of overruns of @a timer at date @a now
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+unsigned long long xntimer_get_overruns(struct xntimer *timer,
+					struct xnthread *waiter,
+					xnticks_t now)
+{
+	xnticks_t period = timer->interval;
+	unsigned long long overruns = 0;
+	xnsticks_t delta;
+	xntimerq_t *q;
+
+	atomic_only();
+
+	delta = now - xntimer_pexpect(timer);
+	if (unlikely(delta >= (xnsticks_t) period)) {
+		period = timer->interval_ns;
+		delta = xnclock_ticks_to_ns(xntimer_clock(timer), delta);
+		overruns = xnarch_div64(delta, period);
+		timer->pexpect_ticks += overruns;
+		if (xntimer_running_p(timer)) {
+			XENO_BUG_ON(COBALT, (timer->status &
+				    (XNTIMER_DEQUEUED|XNTIMER_PERIODIC))
+				    != XNTIMER_PERIODIC);
+				q = xntimer_percpu_queue(timer);
+			xntimer_dequeue(timer, q);
+			while (xntimerh_date(&timer->aplink) < now) {
+				timer->periodic_ticks++;
+				xntimer_update_date(timer);
+			}
+			xntimer_enqueue_and_program(timer, q);
+		}
+	}
+
+	timer->pexpect_ticks++;
+
+	/* Hide overruns due to the most recent ptracing session. */
+	if (xnthread_test_localinfo(waiter, XNHICCUP))
+		return 0;
+
+	return overruns;
+}
+EXPORT_SYMBOL_GPL(xntimer_get_overruns);
+
+char *xntimer_format_time(xnticks_t ns, char *buf, size_t bufsz)
+{
+	unsigned long ms, us, rem;
+	int len = (int)bufsz;
+	char *p = buf;
+	xnticks_t sec;
+
+	if (ns == 0 && bufsz > 1) {
+		strcpy(buf, "-");
+		return buf;
+	}
+
+	sec = xnclock_divrem_billion(ns, &rem);
+	us = rem / 1000;
+	ms = us / 1000;
+	us %= 1000;
+
+	if (sec) {
+		p += ksformat(p, bufsz, "%Lus", sec);
+		len = bufsz - (p - buf);
+	}
+
+	if (len > 0 && (ms || (sec && us))) {
+		p += ksformat(p, bufsz - (p - buf), "%lums", ms);
+		len = bufsz - (p - buf);
+	}
+
+	if (len > 0 && us)
+		p += ksformat(p, bufsz - (p - buf), "%luus", us);
+
+	return buf;
+}
+EXPORT_SYMBOL_GPL(xntimer_format_time);
+
+#if defined(CONFIG_XENO_OPT_TIMER_RBTREE)
+static inline bool xntimerh_is_lt(xntimerh_t *left, xntimerh_t *right)
+{
+	return left->date < right->date
+		|| (left->date == right->date && left->prio > right->prio);
+}
+
+void xntimerq_insert(xntimerq_t *q, xntimerh_t *holder)
+{
+	struct rb_node **new = &q->root.rb_node, *parent = NULL;
+
+	if (!q->head)
+		q->head = holder;
+	else if (xntimerh_is_lt(holder, q->head)) {
+		parent = &q->head->link;
+		new = &parent->rb_left;
+		q->head = holder;
+	} else while (*new) {
+		xntimerh_t *i = container_of(*new, xntimerh_t, link);
+
+		parent = *new;
+		if (xntimerh_is_lt(holder, i))
+			new = &((*new)->rb_left);
+		else
+			new = &((*new)->rb_right);
+	}
+
+	rb_link_node(&holder->link, parent, new);
+	rb_insert_color(&holder->link, &q->root);
+}
+#endif
+
+/** @} */
+++ linux-patched/kernel/Makefile	2022-03-21 12:58:28.219900370 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:163 @
 	$(call cmd,genikh)
 
 clean-files := kheaders_data.tar.xz kheaders.md5
--- linux/drivers/xenomai/spi/spi-device.h	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENOMAI) += xenomai/
+++ linux-patched/drivers/xenomai/spi/spi-device.h	2022-03-21 12:58:31.487868503 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_SPI_DEVICE_H
+#define _RTDM_SPI_DEVICE_H
+
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/spi.h>
+
+struct class;
+struct rtdm_spi_master;
+
+struct rtdm_spi_remote_slave {
+	u8 chip_select;
+	int cs_gpio;
+	struct gpio_desc *cs_gpiod;
+	struct rtdm_device dev;
+	struct list_head next;
+	struct rtdm_spi_config config;
+	struct rtdm_spi_master *master;
+	atomic_t mmap_refs;
+	struct mutex ctl_lock;
+};
+
+static inline struct device *
+slave_to_kdev(struct rtdm_spi_remote_slave *slave)
+{
+	return rtdm_dev_to_kdev(&slave->dev);
+}
+
+int rtdm_spi_add_remote_slave(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_master *spim,
+			      struct spi_device *spi);
+
+void rtdm_spi_remove_remote_slave(struct rtdm_spi_remote_slave *slave);
+
+#endif /* !_RTDM_SPI_DEVICE_H */
+++ linux-patched/drivers/xenomai/spi/Kconfig	2022-03-21 12:58:31.480868571 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-device.c	1970-01-01 01:00:00.000000000 +0100
+menu "Real-time SPI master drivers"
+
+config XENO_DRIVERS_SPI
+       depends on SPI
+       tristate
+
+config XENO_DRIVERS_SPI_BCM2835
+	depends on ARCH_BCM2708 || ARCH_BCM2835
+	select XENO_DRIVERS_SPI
+	tristate "Support for BCM2835 SPI"
+	help
+
+	Enables support for the SPI0 controller available from
+	Broadcom's BCM2835 SoC.
+
+config XENO_DRIVERS_SPI_SUN6I
+	depends on MACH_SUN6I || MACH_SUN8I
+	select XENO_DRIVERS_SPI
+	tristate "Support for A31/H3 SoC SPI"
+	help
+
+	Enables support for the SPI controller available from
+	Allwinner's A31, H3 SoCs.
+
+config XENO_DRIVERS_SPI_OMAP2_MCSPI_RT
+	tristate "McSPI rt-driver for OMAP"
+	depends on HAS_DMA
+	depends on ARCH_OMAP2PLUS || COMPILE_TEST
+	select XENO_DRIVERS_SPI
+	help
+
+	SPI real-time master controller for OMAP24XX and later Multichannel SPI
+	(McSPI) modules.
+
+config XENO_DRIVERS_SPI_DEBUG
+       depends on XENO_DRIVERS_SPI
+       bool "Enable SPI core debugging features"
+       
+endmenu
+++ linux-patched/drivers/xenomai/spi/spi-device.c	2022-03-21 12:58:31.473868639 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-omap2-mcspi-rt.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include "spi-master.h"
+
+int rtdm_spi_add_remote_slave(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_master *master,
+			      struct spi_device *spi)
+{
+	struct spi_master *kmaster = master->kmaster;
+	struct rtdm_device *dev;
+	rtdm_lockctx_t c;
+	int ret;
+
+	memset(slave, 0, sizeof(*slave));
+	slave->chip_select = spi->chip_select;
+	slave->config.bits_per_word = spi->bits_per_word;
+	slave->config.speed_hz = spi->max_speed_hz;
+	slave->config.mode = spi->mode;
+	slave->master = master;
+	
+	dev = &slave->dev;
+	dev->driver = &master->driver;
+	dev->label = kasprintf(GFP_KERNEL, "%s/slave%d.%%d",
+			       dev_name(&kmaster->dev),
+			       kmaster->bus_num);
+	if (dev->label == NULL)
+		return -ENOMEM;
+
+	if (gpio_is_valid(spi->cs_gpio))
+		slave->cs_gpio = spi->cs_gpio;
+	else {
+		slave->cs_gpio = -ENOENT;
+		if (kmaster->cs_gpios)
+			slave->cs_gpio = kmaster->cs_gpios[spi->chip_select];
+	}
+
+	if (gpio_is_valid(slave->cs_gpio)) {
+		ret = gpio_request(slave->cs_gpio, dev->label);
+		if (ret)
+			goto fail;
+		slave->cs_gpiod = gpio_to_desc(slave->cs_gpio);
+		if (slave->cs_gpiod == NULL)
+			goto fail;
+	}
+	
+	mutex_init(&slave->ctl_lock);
+
+	dev->device_data = master;
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		goto fail;
+
+	rtdm_lock_get_irqsave(&master->lock, c);
+	list_add_tail(&slave->next, &master->slaves);
+	rtdm_lock_put_irqrestore(&master->lock, c);
+
+	return 0;
+fail:
+	kfree(dev->label);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_add_remote_slave);
+
+void rtdm_spi_remove_remote_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct rtdm_spi_master *master = slave->master;
+	struct rtdm_device *dev;
+	rtdm_lockctx_t c;
+	
+	if (gpio_is_valid(slave->cs_gpio))
+		gpio_free(slave->cs_gpio);
+
+	mutex_destroy(&slave->ctl_lock);
+	rtdm_lock_get_irqsave(&master->lock, c);
+	list_del(&slave->next);
+	rtdm_lock_put_irqrestore(&master->lock, c);
+	dev = &slave->dev;
+	rtdm_dev_unregister(dev);
+	kfree(dev->label);
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_remove_remote_slave);
+
+static int spi_device_probe(struct spi_device *spi)
+{
+	struct rtdm_spi_remote_slave *slave;
+	struct rtdm_spi_master *master;
+	int ret;
+
+	/*
+	 * Chicken and egg issue: we want the RTDM device class name
+	 * to duplicate the SPI master name, but that information is
+	 * only available after spi_register_master() has returned. We
+	 * solve this by initializing the RTDM driver descriptor on
+	 * the fly when the first SPI device on the bus is advertised
+	 * on behalf of spi_register_master().
+	 *
+	 * NOTE: the driver core guarantees serialization.
+	 */
+	master = spi_master_get_devdata(spi->master);
+	if (master->devclass == NULL) {
+		ret = __rtdm_spi_setup_driver(master);
+		if (ret)
+			return ret;
+	}
+
+	slave = master->ops->attach_slave(master, spi);
+	if (IS_ERR(slave))
+		return PTR_ERR(slave);
+
+	spi_set_drvdata(spi, slave);
+
+	return 0;
+}
+
+static int spi_device_remove(struct spi_device *spi)
+{
+	struct rtdm_spi_remote_slave *slave = spi_get_drvdata(spi);
+
+	slave->master->ops->detach_slave(slave);
+
+	return 0;
+}
+
+static const struct of_device_id spi_device_match[] = {
+	{
+		.compatible = "rtdm-spidev",
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, spi_device_match);
+
+static struct spi_driver spi_device_driver = {
+	.driver = {
+		.name =	"rtdm_spi_device",
+		.owner = THIS_MODULE,
+		.of_match_table = spi_device_match,
+	},
+	.probe	= spi_device_probe,
+	.remove	= spi_device_remove,
+};
+
+static int __init spi_device_init(void)
+{
+	int ret;
+
+	ret = spi_register_driver(&spi_device_driver);
+
+	return ret;
+}
+module_init(spi_device_init);
+
+static void __exit spi_device_exit(void)
+{
+	spi_unregister_driver(&spi_device_driver);
+
+}
+module_exit(spi_device_exit);
+++ linux-patched/drivers/xenomai/spi/spi-omap2-mcspi-rt.c	2022-03-21 12:58:31.465868717 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/Makefile	1970-01-01 01:00:00.000000000 +0100
+/**
+ * I/O handling lifted from drivers/spi/spi-omap2-mcspi.c:
+ * Copyright (C) 2019 Laurentiu-Cristian Duca
+ *  <laurentiu [dot] duca [at] gmail [dot] com>
+ * RTDM integration by:
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/gcd.h>
+#include "spi-master.h"
+
+#define RTDM_SUBCLASS_OMAP2_MCSPI  3
+
+#define OMAP4_MCSPI_REG_OFFSET 0x100
+#define OMAP2_MCSPI_SPI_MODE_BITS	(SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
+#define OMAP2_MCSPI_MAX_FREQ		48000000
+#define OMAP2_MCSPI_DRIVER_MAX_FREQ	40000000
+#define OMAP2_MCSPI_MAX_DIVIDER		4096
+#define OMAP2_MCSPI_MAX_FIFODEPTH	64
+#define OMAP2_MCSPI_MAX_FIFOWCNT	0xFFFF
+#define SPI_AUTOSUSPEND_TIMEOUT		2000
+#define PM_NEGATIVE_DELAY			-2000
+
+#define OMAP2_MCSPI_REVISION		0x00
+#define OMAP2_MCSPI_SYSCONFIG		0x10
+#define OMAP2_MCSPI_SYSSTATUS		0x14
+#define OMAP2_MCSPI_IRQSTATUS		0x18
+#define OMAP2_MCSPI_IRQENABLE		0x1c
+#define OMAP2_MCSPI_WAKEUPENABLE	0x20
+#define OMAP2_MCSPI_SYST		0x24
+#define OMAP2_MCSPI_MODULCTRL		0x28
+#define OMAP2_MCSPI_XFERLEVEL		0x7c
+
+/* per-channel (chip select) banks, 0x14 bytes each, first is: */
+#define OMAP2_MCSPI_CHANNELBANK_SIZE	0x14
+#define OMAP2_MCSPI_CHCONF0		0x2c
+#define OMAP2_MCSPI_CHSTAT0		0x30
+#define OMAP2_MCSPI_CHCTRL0		0x34
+#define OMAP2_MCSPI_TX0			0x38
+#define OMAP2_MCSPI_RX0			0x3c
+
+/* per-register bitmasks: */
+#define OMAP2_MCSPI_IRQSTATUS_EOW		BIT(17)
+#define OMAP2_MCSPI_IRQSTATUS_RX1_FULL  BIT(6)
+#define OMAP2_MCSPI_IRQSTATUS_TX1_EMPTY	BIT(4)
+#define OMAP2_MCSPI_IRQSTATUS_RX0_FULL  BIT(2)
+#define OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY	BIT(0)
+
+#define OMAP2_MCSPI_IRQENABLE_EOW		BIT(17)
+#define OMAP2_MCSPI_IRQENABLE_RX1_FULL  BIT(6)
+#define OMAP2_MCSPI_IRQENABLE_TX1_EMPTY	BIT(4)
+#define OMAP2_MCSPI_IRQENABLE_RX0_FULL  BIT(2)
+#define OMAP2_MCSPI_IRQENABLE_TX0_EMPTY	BIT(0)
+
+#define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
+#define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
+#define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
+
+#define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
+#define OMAP2_MCSPI_CHCONF_POL		BIT(1)
+#define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
+#define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
+#define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
+#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
+#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
+#define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
+#define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
+#define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
+#define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
+#define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
+#define OMAP2_MCSPI_CHCONF_IS		BIT(18)
+#define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
+#define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
+#define OMAP2_MCSPI_CHCONF_FFET		BIT(27)
+#define OMAP2_MCSPI_CHCONF_FFER		BIT(28)
+#define OMAP2_MCSPI_CHCONF_CLKG		BIT(29)
+
+#define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
+#define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
+#define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
+#define OMAP2_MCSPI_CHSTAT_TXFFE	BIT(3)
+
+#define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
+#define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK	(0xff << 8)
+
+#define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
+
+#define OMAP2_MCSPI_SYSCONFIG_CLOCKACTIVITY_MASK	(0x3 << 8)
+#define OMAP2_MCSPI_SYSCONFIG_SIDLEMODE_MASK		(0x3 << 3)
+#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET				BIT(1)
+#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE				BIT(0)
+
+#define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0)
+
+/* current version supports max 2 CS per module */
+#define OMAP2_MCSPI_CS_N	2
+
+#define MCSPI_PINDIR_D0_IN_D1_OUT	0
+#define MCSPI_PINDIR_D0_OUT_D1_IN	1
+
+struct omap2_mcspi_platform_config {
+	unsigned short	num_cs;
+	unsigned int regs_offset;
+	unsigned int pin_dir:1;
+};
+
+struct omap2_mcspi_cs {
+	/* CS channel */
+	void __iomem		*regs;
+	unsigned long		phys;
+	u8 chosen;
+};
+
+struct spi_master_omap2_mcspi {
+	struct rtdm_spi_master master;
+	void __iomem *regs;
+	unsigned long phys;
+	rtdm_irq_t irqh;
+	const u8 *tx_buf;
+	u8 *rx_buf;
+	int tx_len;
+	int rx_len;
+	int fifo_depth;
+	rtdm_event_t transfer_done;
+	rtdm_lock_t lock;
+	unsigned int pin_dir:1;
+	struct omap2_mcspi_cs cs[OMAP2_MCSPI_CS_N];
+	/* logging */
+	int n_rx_full;
+	int n_tx_empty;
+	int n_interrupts;
+};
+
+struct spi_slave_omap2_mcspi {
+	struct rtdm_spi_remote_slave slave;
+	void *io_virt;
+	dma_addr_t io_dma;
+	size_t io_len;
+};
+
+static inline struct spi_slave_omap2_mcspi *
+to_slave_omap2_mcspi(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave, struct spi_slave_omap2_mcspi, slave);
+}
+
+static inline struct spi_master_omap2_mcspi *
+to_master_omap2_mcspi(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave->master,
+			struct spi_master_omap2_mcspi, master);
+}
+
+static inline struct device *
+master_to_kdev(struct rtdm_spi_master *master)
+{
+	return &master->kmaster->dev;
+}
+
+static inline u32 mcspi_rd_reg(struct spi_master_omap2_mcspi *spim,
+			     unsigned int reg)
+{
+	return readl(spim->regs + reg);
+}
+
+static inline void mcspi_wr_reg(struct spi_master_omap2_mcspi *spim,
+			      unsigned int reg, u32 val)
+{
+	writel(val, spim->regs + reg);
+}
+
+static inline u32
+mcspi_rd_cs_reg(struct spi_master_omap2_mcspi *spim,
+				int cs_id, unsigned int reg)
+{
+	return readl(spim->cs[cs_id].regs + reg);
+}
+
+static inline void
+mcspi_wr_cs_reg(struct spi_master_omap2_mcspi *spim, int cs_id,
+				unsigned int reg, u32 val)
+{
+	writel(val, spim->cs[cs_id].regs + reg);
+}
+
+static void omap2_mcspi_init_hw(struct spi_master_omap2_mcspi *spim)
+{
+	u32 l;
+
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_SYSCONFIG);
+	/* CLOCKACTIVITY = 3h: OCP and Functional clocks are maintained */
+	l |= OMAP2_MCSPI_SYSCONFIG_CLOCKACTIVITY_MASK;
+	/* SIDLEMODE = 1h: ignore idle requests */
+	l &= ~OMAP2_MCSPI_SYSCONFIG_SIDLEMODE_MASK;
+	l |= 0x1 << 3;
+	/* AUTOIDLE=0: OCP clock is free-running */
+	l &= ~OMAP2_MCSPI_SYSCONFIG_AUTOIDLE;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_SYSCONFIG, l);
+
+	/* Initialise the hardware with the default polarities (only omap2) */
+	mcspi_wr_reg(spim, OMAP2_MCSPI_WAKEUPENABLE,
+				 OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+
+	/* Setup single-channel master mode */
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_MODULCTRL);
+	/* MS=0 => spi master */
+	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
+	l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_MODULCTRL, l);
+}
+
+static void omap2_mcspi_reset_hw(struct spi_master_omap2_mcspi *spim)
+{
+	u32 l;
+
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_SYSCONFIG);
+	l |= OMAP2_MCSPI_SYSCONFIG_SOFTRESET;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_SYSCONFIG, l);
+	/* wait until reset is done */
+	do {
+		l = mcspi_rd_reg(spim, OMAP2_MCSPI_SYSSTATUS);
+		cpu_relax();
+	} while (!(l & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
+}
+
+static void
+omap2_mcspi_chip_select(struct rtdm_spi_remote_slave *slave, bool active)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 l;
+
+	/* FORCE: manual SPIEN assertion to keep SPIEN active */
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+	/* "active" is the logical state, not the impedance level. */
+	if (active)
+		l |= OMAP2_MCSPI_CHCONF_FORCE;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_FORCE;
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, l);
+	/* Flash post-writes */
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+}
+
+static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
+{
+	u32 div;
+
+	for (div = 0; div < 15; div++)
+		if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
+			return div;
+
+	return 15;
+}
+
+/* channel 0 enable/disable */
+static void
+omap2_mcspi_channel_enable(struct rtdm_spi_remote_slave *slave, int enable)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 l;
+
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCTRL0);
+	if (enable)
+		l |= OMAP2_MCSPI_CHCTRL_EN;
+	else
+		l &= ~OMAP2_MCSPI_CHCTRL_EN;
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCTRL0, l);
+	/* Flash post-writes */
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCTRL0);
+}
+
+/* called only when no transfer is active to this device */
+static int omap2_mcspi_configure(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	u32 l = 0, clkd = 0, div = 1, extclk = 0, clkg = 0, word_len;
+	u32 speed_hz = OMAP2_MCSPI_MAX_FREQ;
+	u32 chctrl0;
+
+	/* The configuration parameters can be loaded in MCSPI_CH(i)CONF
+	 * only when the channel is disabled
+	 */
+	omap2_mcspi_channel_enable(slave, 0);
+
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+
+	/* Set clock frequency. */
+	speed_hz = (u32) config->speed_hz;
+	if (speed_hz > OMAP2_MCSPI_DRIVER_MAX_FREQ) {
+		dev_warn(slave_to_kdev(slave),
+			"maximum clock frequency is %d",
+			OMAP2_MCSPI_DRIVER_MAX_FREQ);
+	}
+	speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_DRIVER_MAX_FREQ);
+	if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+		clkd = omap2_mcspi_calc_divisor(speed_hz);
+		speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
+		clkg = 0;
+	} else {
+		div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+		speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
+		clkd = (div - 1) & 0xf;
+		extclk = (div - 1) >> 4;
+		clkg = OMAP2_MCSPI_CHCONF_CLKG;
+	}
+	/* set clock divisor */
+	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
+	l |= clkd << 2;
+	/* set clock granularity */
+	l &= ~OMAP2_MCSPI_CHCONF_CLKG;
+	l |= clkg;
+	if (clkg) {
+		chctrl0 = mcspi_rd_cs_reg(spim,
+			slave->chip_select, OMAP2_MCSPI_CHCTRL0);
+		chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
+		chctrl0 |= extclk << 8;
+		mcspi_wr_cs_reg(spim,
+			slave->chip_select, OMAP2_MCSPI_CHCTRL0, chctrl0);
+	}
+
+	if (spim->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+		l &= ~OMAP2_MCSPI_CHCONF_IS;
+		l &= ~OMAP2_MCSPI_CHCONF_DPE1;
+		l |= OMAP2_MCSPI_CHCONF_DPE0;
+	} else {
+		l |= OMAP2_MCSPI_CHCONF_IS;
+		l |= OMAP2_MCSPI_CHCONF_DPE1;
+		l &= ~OMAP2_MCSPI_CHCONF_DPE0;
+	}
+
+	/* wordlength */
+	word_len = config->bits_per_word;
+	/* TODO: allow word_len != 8 */
+	if (word_len != 8) {
+		dev_err(slave_to_kdev(slave), "word_len(%d) != 8.\n",
+				word_len);
+		return -EIO;
+	}
+	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
+	l |= (word_len - 1) << 7;
+
+	/* set chipselect polarity; manage with FORCE */
+	if (!(config->mode & SPI_CS_HIGH))
+		/* CS active-low */
+		l |= OMAP2_MCSPI_CHCONF_EPOL;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
+
+	/* set SPI mode 0..3 */
+	if (config->mode & SPI_CPOL)
+		l |= OMAP2_MCSPI_CHCONF_POL;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_POL;
+	if (config->mode & SPI_CPHA)
+		l |= OMAP2_MCSPI_CHCONF_PHA;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_PHA;
+
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, l);
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+
+	omap2_mcspi_chip_select(slave, 0);
+
+	return 0;
+}
+
+static void mcspi_rd_fifo(struct spi_master_omap2_mcspi *spim, int cs_id)
+{
+	u8 byte;
+	int i;
+
+	/* Receiver register must be read to remove source of interrupt */
+	for (i = 0; i < spim->fifo_depth; i++) {
+		byte = mcspi_rd_cs_reg(spim, cs_id, OMAP2_MCSPI_RX0);
+		if (spim->rx_buf && (spim->rx_len > 0))
+			*spim->rx_buf++ = byte;
+		spim->rx_len--;
+	}
+}
+
+static void mcspi_wr_fifo(struct spi_master_omap2_mcspi *spim, int cs_id)
+{
+	u8 byte;
+	int i;
+
+	/* load transmitter register to remove the source of the interrupt */
+	for (i = 0; i < spim->fifo_depth; i++) {
+		if (spim->tx_len <= 0)
+			byte = 0;
+		else
+			byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		mcspi_wr_cs_reg(spim, cs_id, OMAP2_MCSPI_TX0, byte);
+		spim->tx_len--;
+	}
+}
+
+static void mcspi_wr_fifo_bh(struct spi_master_omap2_mcspi *spim, int cs_id)
+{
+	u8 byte;
+	int i;
+	rtdm_lockctx_t c;
+
+	rtdm_lock_get_irqsave(&spim->lock, c);
+
+	for (i = 0; i < spim->fifo_depth; i++) {
+		if (spim->tx_len <= 0)
+			byte = 0;
+		else
+			byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		mcspi_wr_cs_reg(spim, cs_id, OMAP2_MCSPI_TX0, byte);
+		spim->tx_len--;
+	}
+
+	rtdm_lock_put_irqrestore(&spim->lock, c);
+}
+
+static int omap2_mcspi_interrupt(rtdm_irq_t *irqh)
+{
+	struct spi_master_omap2_mcspi *spim;
+	u32 l;
+	int i, cs_id = 0;
+
+	spim = rtdm_irq_get_arg(irqh, struct spi_master_omap2_mcspi);
+	rtdm_lock_get(&spim->lock);
+
+	for (i = 0; i < OMAP2_MCSPI_CS_N; i++)
+		if (spim->cs[i].chosen) {
+			cs_id = i;
+			break;
+		}
+
+	spim->n_interrupts++;
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_IRQSTATUS);
+
+	if ((l & OMAP2_MCSPI_IRQSTATUS_RX0_FULL) ||
+	   (l & OMAP2_MCSPI_IRQSTATUS_RX1_FULL)) {
+		mcspi_rd_fifo(spim, cs_id);
+		spim->n_rx_full++;
+	}
+	if ((l & OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY) ||
+		(l & OMAP2_MCSPI_IRQSTATUS_TX1_EMPTY)) {
+		if (spim->tx_len > 0)
+			mcspi_wr_fifo(spim, cs_id);
+		spim->n_tx_empty++;
+	}
+
+	/* write 1 to OMAP2_MCSPI_IRQSTATUS field to reset it */
+	mcspi_wr_reg(spim, OMAP2_MCSPI_IRQSTATUS, l);
+
+	if ((spim->tx_len <= 0) && (spim->rx_len <= 0)) {
+		/* disable interrupts */
+		mcspi_wr_reg(spim, OMAP2_MCSPI_IRQENABLE, 0);
+
+		rtdm_event_signal(&spim->transfer_done);
+	}
+
+	rtdm_lock_put(&spim->lock);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int omap2_mcspi_disable_fifo(struct rtdm_spi_remote_slave *slave,
+							int cs_id)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 chconf;
+
+	chconf = mcspi_rd_cs_reg(spim, cs_id, OMAP2_MCSPI_CHCONF0);
+	chconf &= ~(OMAP2_MCSPI_CHCONF_FFER | OMAP2_MCSPI_CHCONF_FFET);
+	mcspi_wr_cs_reg(spim, cs_id, OMAP2_MCSPI_CHCONF0, chconf);
+	return 0;
+}
+
+static int omap2_mcspi_set_fifo(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	unsigned int wcnt;
+	int max_fifo_depth, fifo_depth, bytes_per_word;
+	u32 chconf, xferlevel;
+
+	chconf = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+	bytes_per_word = 1;
+
+	max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
+	if (spim->tx_len < max_fifo_depth) {
+		fifo_depth = spim->tx_len;
+		wcnt = spim->tx_len / bytes_per_word;
+	} else {
+		fifo_depth = max_fifo_depth;
+		wcnt = max_fifo_depth * (spim->tx_len / max_fifo_depth)
+			/ bytes_per_word;
+	}
+	if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) {
+		dev_err(slave_to_kdev(slave),
+			"%s: wcnt=%d: too many bytes in a transfer.\n",
+			__func__, wcnt);
+		return -EINVAL;
+	}
+
+	chconf |= OMAP2_MCSPI_CHCONF_FFER;
+	chconf |= OMAP2_MCSPI_CHCONF_FFET;
+
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, chconf);
+	spim->fifo_depth = fifo_depth;
+
+	xferlevel = wcnt << 16;
+	xferlevel |= (fifo_depth - 1) << 8;
+	xferlevel |= fifo_depth - 1;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_XFERLEVEL, xferlevel);
+
+	return 0;
+}
+
+
+static int do_transfer_irq_bh(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 chconf, l;
+	int ret;
+	int i;
+
+	/* configure to send and receive */
+	chconf = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+	chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+	chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, chconf);
+
+	/* fifo can be enabled on a single channel */
+	if (slave->chip_select == 0) {
+		if (spim->cs[1].chosen)
+			omap2_mcspi_disable_fifo(slave, 1);
+	} else {
+		if (spim->cs[0].chosen)
+			omap2_mcspi_disable_fifo(slave, 0);
+	}
+	ret = omap2_mcspi_set_fifo(slave);
+	if (ret)
+		return ret;
+
+	omap2_mcspi_channel_enable(slave, 1);
+
+	/* Set slave->chip_select as chosen */
+	for (i = 0; i < OMAP2_MCSPI_CS_N; i++)
+		if (i == slave->chip_select)
+			spim->cs[i].chosen = 1;
+		else
+			spim->cs[i].chosen = 0;
+
+	/* The interrupt status bit should always be reset
+	 * after the channel is enabled
+	 * and before the event is enabled as an interrupt source.
+	 */
+	/* write 1 to OMAP2_MCSPI_IRQSTATUS field to reset it */
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_IRQSTATUS);
+	mcspi_wr_reg(spim, OMAP2_MCSPI_IRQSTATUS, l);
+
+	spim->n_interrupts = 0;
+	spim->n_rx_full = 0;
+	spim->n_tx_empty = 0;
+
+	/* Enable interrupts last. */
+	/* support only two channels */
+	if (slave->chip_select == 0)
+		l = OMAP2_MCSPI_IRQENABLE_TX0_EMPTY |
+			OMAP2_MCSPI_IRQENABLE_RX0_FULL;
+	else
+		l = OMAP2_MCSPI_IRQENABLE_TX1_EMPTY |
+			OMAP2_MCSPI_IRQENABLE_RX1_FULL;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_IRQENABLE, l);
+
+	/* TX_EMPTY will be raised only after data is transfered */
+	mcspi_wr_fifo_bh(spim, slave->chip_select);
+
+	/* wait for transfer completion */
+	ret = rtdm_event_wait(&spim->transfer_done);
+	omap2_mcspi_channel_enable(slave, 0);
+	if (ret)
+		return ret;
+
+	/* spim->tx_len and spim->rx_len should be 0 */
+	if (spim->tx_len || spim->rx_len)
+		return -EIO;
+	return 0;
+}
+
+static int do_transfer_irq(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	int len, first_size, last_size, ret;
+
+	len = spim->tx_len;
+
+	if (len < (OMAP2_MCSPI_MAX_FIFODEPTH / 2))
+		goto label_last;
+
+	first_size = (OMAP2_MCSPI_MAX_FIFODEPTH / 2) *
+		(len / (OMAP2_MCSPI_MAX_FIFODEPTH / 2));
+	spim->tx_len = first_size;
+	spim->rx_len = first_size;
+	ret = do_transfer_irq_bh(slave);
+	if (ret)
+		return ret;
+
+label_last:
+	last_size = len % (OMAP2_MCSPI_MAX_FIFODEPTH / 2);
+	if (last_size == 0)
+		return ret;
+	spim->tx_len = last_size;
+	spim->rx_len = last_size;
+	ret = do_transfer_irq_bh(slave);
+	return ret;
+}
+
+static int omap2_mcspi_transfer_iobufs(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+	int ret;
+
+	if (mapped_data->io_len == 0)
+		return -EINVAL;	/* No I/O buffers set. */
+
+	spim->tx_len = mapped_data->io_len / 2;
+	spim->rx_len = spim->tx_len;
+	spim->tx_buf = mapped_data->io_virt + spim->rx_len;
+	spim->rx_buf = mapped_data->io_virt;
+
+	ret = do_transfer_irq(slave);
+
+	return ret ? : 0;
+}
+
+static int omap2_mcspi_transfer_iobufs_n(struct rtdm_spi_remote_slave *slave,
+								 int len)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+	int ret;
+
+	if ((mapped_data->io_len == 0) ||
+		(len <= 0) || (len > (mapped_data->io_len / 2)))
+		return -EINVAL;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = mapped_data->io_virt + mapped_data->io_len / 2;
+	spim->rx_buf = mapped_data->io_virt;
+
+	ret = do_transfer_irq(slave);
+
+
+	return ret ? : 0;
+}
+
+static ssize_t omap2_mcspi_read(struct rtdm_spi_remote_slave *slave,
+			    void *rx, size_t len)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	int ret;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = NULL;
+	spim->rx_buf = rx;
+
+	ret = do_transfer_irq(slave);
+
+	return  ret ? : len;
+}
+
+static ssize_t omap2_mcspi_write(struct rtdm_spi_remote_slave *slave,
+			     const void *tx, size_t len)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	int ret;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = tx;
+	spim->rx_buf = NULL;
+
+	ret = do_transfer_irq(slave);
+
+	return  ret ? : len;
+}
+
+static int set_iobufs(struct spi_slave_omap2_mcspi *mapped_data, size_t len)
+{
+	dma_addr_t dma;
+	void *p;
+
+	if (len == 0)
+		return -EINVAL;
+
+	len = L1_CACHE_ALIGN(len) * 2;
+	if (len == mapped_data->io_len)
+		return 0;
+
+	if (mapped_data->io_len)
+		return -EINVAL;	/* I/O buffers may not be resized. */
+
+	/*
+	 * Since we need the I/O buffers to be set for starting a
+	 * transfer, there is no need for serializing this routine and
+	 * transfer_iobufs(), provided io_len is set last.
+	 *
+	 * NOTE: We don't need coherent memory until we actually get
+	 * DMA transfers working, this code is a bit ahead of
+	 * schedule.
+	 *
+	 * Revisit: this assumes DMA mask is 4Gb.
+	 */
+	p = dma_alloc_coherent(NULL, len, &dma, GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	mapped_data->io_dma = dma;
+	mapped_data->io_virt = p;
+	/*
+	 * May race with transfer_iobufs(), must be assigned after all
+	 * the rest is set up, enforcing a membar.
+	 */
+	smp_mb();
+	mapped_data->io_len = len;
+
+	return 0;
+}
+
+static int omap2_mcspi_set_iobufs(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_iobufs *p)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+	int ret;
+
+	ret = set_iobufs(mapped_data, p->io_len);
+	if (ret)
+		return ret;
+
+	p->i_offset = 0;
+	p->o_offset = mapped_data->io_len / 2;
+	p->map_len = mapped_data->io_len;
+
+	return 0;
+}
+
+static int omap2_mcspi_mmap_iobufs(struct rtdm_spi_remote_slave *slave,
+			       struct vm_area_struct *vma)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+
+	/*
+	 * dma_alloc_coherent() delivers non-cached memory, make sure
+	 * to return consistent mapping attributes. Typically, mixing
+	 * memory attributes across address spaces referring to the
+	 * same physical area is architecturally wrong on ARM.
+	 */
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+
+	return rtdm_mmap_kmem(vma, mapped_data->io_virt);
+}
+
+static void omap2_mcspi_mmap_release(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+
+	dma_free_coherent(NULL, mapped_data->io_len,
+			  mapped_data->io_virt, mapped_data->io_dma);
+	mapped_data->io_len = 0;
+}
+
+static struct rtdm_spi_remote_slave *
+omap2_mcspi_attach_slave(struct rtdm_spi_master *master, struct spi_device *spi)
+{
+	struct spi_master_omap2_mcspi *spim;
+	struct spi_slave_omap2_mcspi *mapped_data;
+	int ret;
+
+	if ((spi->chip_select >= OMAP2_MCSPI_CS_N) || (OMAP2_MCSPI_CS_N > 2)) {
+		/* Error in the case of native CS requested with CS > 1 */
+		dev_err(&spi->dev, "%s: only two native CS per spi module are supported\n",
+			__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mapped_data = kzalloc(sizeof(*mapped_data), GFP_KERNEL);
+	if (mapped_data == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_spi_add_remote_slave(&mapped_data->slave, master, spi);
+	if (ret) {
+		dev_err(&spi->dev, "%s: failed to attach slave\n", __func__);
+		kfree(mapped_data);
+		return ERR_PTR(ret);
+	}
+
+	spim = container_of(master, struct spi_master_omap2_mcspi, master);
+	spim->cs[spi->chip_select].chosen = 0;
+	spim->cs[spi->chip_select].regs = spim->regs +
+		spi->chip_select * OMAP2_MCSPI_CHANNELBANK_SIZE;
+	spim->cs[spi->chip_select].phys = spim->phys +
+		spi->chip_select * OMAP2_MCSPI_CHANNELBANK_SIZE;
+
+	return &mapped_data->slave;
+}
+
+static void omap2_mcspi_detach_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+
+	rtdm_spi_remove_remote_slave(slave);
+
+	kfree(mapped_data);
+}
+
+static struct rtdm_spi_master_ops omap2_mcspi_master_ops = {
+	.configure = omap2_mcspi_configure,
+	.chip_select = omap2_mcspi_chip_select,
+	.set_iobufs = omap2_mcspi_set_iobufs,
+	.mmap_iobufs = omap2_mcspi_mmap_iobufs,
+	.mmap_release = omap2_mcspi_mmap_release,
+	.transfer_iobufs = omap2_mcspi_transfer_iobufs,
+	.transfer_iobufs_n = omap2_mcspi_transfer_iobufs_n,
+	.write = omap2_mcspi_write,
+	.read = omap2_mcspi_read,
+	.attach_slave = omap2_mcspi_attach_slave,
+	.detach_slave = omap2_mcspi_detach_slave,
+};
+
+static struct omap2_mcspi_platform_config omap2_pdata = {
+	.regs_offset = 0,
+};
+
+static struct omap2_mcspi_platform_config omap4_pdata = {
+	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
+};
+
+static const struct of_device_id omap_mcspi_of_match[] = {
+	{
+		.compatible = "ti,omap2-mcspi",
+		.data = &omap2_pdata,
+	},
+	{
+		/* beaglebone black */
+		.compatible = "ti,omap4-mcspi",
+		.data = &omap4_pdata,
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
+
+static int omap2_mcspi_probe(struct platform_device *pdev)
+{
+	struct spi_master_omap2_mcspi *spim;
+	struct rtdm_spi_master *master;
+	struct spi_master *kmaster;
+	struct resource *r;
+	int ret, irq;
+	u32 regs_offset = 0;
+	const struct omap2_mcspi_platform_config *pdata;
+	const struct of_device_id *match;
+	u32 num_cs = 1;
+	unsigned int pin_dir = MCSPI_PINDIR_D0_IN_D1_OUT;
+
+	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
+	if (match) {
+		pdata = match->data;
+		regs_offset = pdata->regs_offset;
+	} else {
+		dev_err(&pdev->dev, "%s: cannot find a match with device tree\n"
+				"of '%s' or '%s'",
+				__func__,
+				omap_mcspi_of_match[0].compatible,
+				omap_mcspi_of_match[1].compatible);
+		return -ENOENT;
+	}
+
+	master = rtdm_spi_alloc_master(&pdev->dev,
+		   struct spi_master_omap2_mcspi, master);
+	if (master == NULL)
+		return -ENOMEM;
+
+	master->subclass = RTDM_SUBCLASS_OMAP2_MCSPI;
+	master->ops = &omap2_mcspi_master_ops;
+	platform_set_drvdata(pdev, master);
+
+	kmaster = master->kmaster;
+	/* flags understood by this controller driver */
+	kmaster->mode_bits = OMAP2_MCSPI_SPI_MODE_BITS;
+	/* TODO: SPI_BPW_RANGE_MASK(4, 32); */
+	kmaster->bits_per_word_mask = SPI_BPW_MASK(8);
+	of_property_read_u32(pdev->dev.of_node, "ti,spi-num-cs", &num_cs);
+	kmaster->num_chipselect = num_cs;
+	if (of_get_property(pdev->dev.of_node,
+		"ti,pindir-d0-out-d1-in", NULL)) {
+		pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
+	}
+
+	kmaster->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+	kmaster->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
+	kmaster->dev.of_node = pdev->dev.of_node;
+
+	spim = container_of(master, struct spi_master_omap2_mcspi, master);
+	rtdm_event_init(&spim->transfer_done, 0);
+	rtdm_lock_init(&spim->lock);
+
+	spim->pin_dir = pin_dir;
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spim->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(spim->regs)) {
+		dev_err(&pdev->dev, "%s: cannot map I/O memory\n", __func__);
+		ret = PTR_ERR(spim->regs);
+		goto fail;
+	}
+	spim->phys = r->start + regs_offset;
+	spim->regs += regs_offset;
+
+	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (irq <= 0) {
+		ret = irq ?: -ENODEV;
+		dev_err(&pdev->dev, "%s: irq_of_parse_and_map: %d\n",
+				__func__, irq);
+		goto fail;
+	}
+
+	ret = rtdm_irq_request(&spim->irqh, irq,
+			       omap2_mcspi_interrupt, 0,
+			       dev_name(&pdev->dev), spim);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: cannot request IRQ%d\n",
+				__func__, irq);
+		goto fail_unclk;
+	}
+
+	ret = rtdm_spi_add_master(&spim->master);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: failed to add master\n", __func__);
+		goto fail_unclk;
+	}
+
+	pm_runtime_use_autosuspend(&pdev->dev);
+	/* if delay is negative and the use_autosuspend flag is set
+	 * then runtime suspends are prevented.
+	 */
+	pm_runtime_set_autosuspend_delay(&pdev->dev, PM_NEGATIVE_DELAY);
+	pm_runtime_enable(&pdev->dev);
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "%s: pm_runtime_get_sync error %d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	omap2_mcspi_reset_hw(spim);
+	omap2_mcspi_init_hw(spim);
+
+	dev_info(&pdev->dev, "success\n");
+	return 0;
+
+fail_unclk:
+fail:
+	spi_master_put(kmaster);
+
+	return ret;
+}
+
+static int omap2_mcspi_remove(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master_omap2_mcspi *spim;
+
+	spim = container_of(master, struct spi_master_omap2_mcspi, master);
+
+	omap2_mcspi_reset_hw(spim);
+
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	rtdm_irq_free(&spim->irqh);
+
+	rtdm_spi_remove_master(master);
+
+	return 0;
+}
+
+static struct platform_driver omap2_mcspi_spi_driver = {
+	.driver		= {
+		.name		= "omap2_mcspi_rt",
+		.of_match_table	= omap_mcspi_of_match,
+	},
+	.probe		= omap2_mcspi_probe,
+	.remove		= omap2_mcspi_remove,
+};
+module_platform_driver(omap2_mcspi_spi_driver);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/spi/Makefile	2022-03-21 12:58:31.458868786 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-master.h	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-$(CONFIG_XENO_DRIVERS_SPI_DEBUG) := -DDEBUG
+
+obj-$(CONFIG_XENO_DRIVERS_SPI) += xeno_spi.o
+
+xeno_spi-y := spi-master.o spi-device.o
+
+obj-$(CONFIG_XENO_DRIVERS_SPI_BCM2835) += xeno_spi_bcm2835.o
+obj-$(CONFIG_XENO_DRIVERS_SPI_SUN6I) += xeno_spi_sun6i.o
+obj-$(CONFIG_XENO_DRIVERS_SPI_OMAP2_MCSPI_RT) += xeno_spi_omap2_mcspi_rt.o
+
+xeno_spi_bcm2835-y := spi-bcm2835.o
+xeno_spi_sun6i-y := spi-sun6i.o
+xeno_spi_omap2_mcspi_rt-y := spi-omap2-mcspi-rt.o
+++ linux-patched/drivers/xenomai/spi/spi-master.h	2022-03-21 12:58:31.451868854 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-master.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_SPI_MASTER_H
+#define _RTDM_SPI_MASTER_H
+
+#include <rtdm/driver.h>
+#include <rtdm/uapi/spi.h>
+#include "spi-device.h"
+
+struct class;
+struct device_node;
+struct rtdm_spi_master;
+struct spi_master;
+
+struct rtdm_spi_master_ops {
+	int (*open)(struct rtdm_spi_remote_slave *slave);
+	void (*close)(struct rtdm_spi_remote_slave *slave);
+	int (*configure)(struct rtdm_spi_remote_slave *slave);
+	void (*chip_select)(struct rtdm_spi_remote_slave *slave,
+			    bool active);
+	int (*set_iobufs)(struct rtdm_spi_remote_slave *slave,
+			  struct rtdm_spi_iobufs *p);
+	int (*mmap_iobufs)(struct rtdm_spi_remote_slave *slave,
+			   struct vm_area_struct *vma);
+	void (*mmap_release)(struct rtdm_spi_remote_slave *slave);
+	int (*transfer_iobufs)(struct rtdm_spi_remote_slave *slave);
+	int (*transfer_iobufs_n)(struct rtdm_spi_remote_slave *slave, int len);
+	ssize_t (*write)(struct rtdm_spi_remote_slave *slave,
+			 const void *tx, size_t len);
+	ssize_t (*read)(struct rtdm_spi_remote_slave *slave,
+			 void *rx, size_t len);
+	struct rtdm_spi_remote_slave *(*attach_slave)
+		(struct rtdm_spi_master *master,
+			struct spi_device *spi);
+	void (*detach_slave)(struct rtdm_spi_remote_slave *slave);
+};
+
+struct rtdm_spi_master {
+	int subclass;
+	const struct rtdm_spi_master_ops *ops;
+	struct spi_master *kmaster;
+	struct {	/* Internal */
+		struct rtdm_driver driver;
+		struct class *devclass;
+		char *classname;
+		struct list_head slaves;
+		struct list_head next;
+		rtdm_lock_t lock;
+		rtdm_mutex_t bus_lock;
+		struct rtdm_spi_remote_slave *cs;
+	};
+};
+
+#define rtdm_spi_alloc_master(__dev, __type, __mptr)			\
+	__rtdm_spi_alloc_master(__dev, sizeof(__type),			\
+				offsetof(__type, __mptr))		\
+
+struct rtdm_spi_master *
+__rtdm_spi_alloc_master(struct device *dev, size_t size, int off);
+
+int __rtdm_spi_setup_driver(struct rtdm_spi_master *master);
+
+int rtdm_spi_add_master(struct rtdm_spi_master *master);
+
+void rtdm_spi_remove_master(struct rtdm_spi_master *master);
+
+#endif /* !_RTDM_SPI_MASTER_H */
+++ linux-patched/drivers/xenomai/spi/spi-master.c	2022-03-21 12:58:31.443868932 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-sun6i.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include "spi-master.h"
+
+static inline
+struct device *to_kdev(struct rtdm_spi_remote_slave *slave)
+{
+	return rtdm_dev_to_kdev(&slave->dev);
+}
+
+static inline struct rtdm_spi_remote_slave *fd_to_slave(struct rtdm_fd *fd)
+{
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+
+	return container_of(dev, struct rtdm_spi_remote_slave, dev);
+}
+
+static int update_slave_config(struct rtdm_spi_remote_slave *slave,
+			       struct rtdm_spi_config *config)
+{
+	struct rtdm_spi_config old_config;
+	struct rtdm_spi_master *master = slave->master;
+	int ret;
+
+	rtdm_mutex_lock(&master->bus_lock);
+
+	old_config = slave->config;
+	slave->config = *config;
+	ret = slave->master->ops->configure(slave);
+	if (ret) {
+		slave->config = old_config;
+		rtdm_mutex_unlock(&master->bus_lock);
+		return ret;
+	}
+
+	rtdm_mutex_unlock(&master->bus_lock);
+	
+	dev_info(to_kdev(slave),
+		 "configured mode %d, %s%s%s%s%u bits/w, %u Hz max\n",
+		 (int) (slave->config.mode & (SPI_CPOL | SPI_CPHA)),
+		 (slave->config.mode & SPI_CS_HIGH) ? "cs_high, " : "",
+		 (slave->config.mode & SPI_LSB_FIRST) ? "lsb, " : "",
+		 (slave->config.mode & SPI_3WIRE) ? "3wire, " : "",
+		 (slave->config.mode & SPI_LOOP) ? "loopback, " : "",
+		 slave->config.bits_per_word,
+		 slave->config.speed_hz);
+	
+	return 0;
+}
+
+static int spi_master_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+
+	if (master->ops->open)
+		return master->ops->open(slave);
+		
+	return 0;
+}
+
+static void spi_master_close(struct rtdm_fd *fd)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	rtdm_lockctx_t c;
+
+	rtdm_lock_get_irqsave(&master->lock, c);
+
+	if (master->cs == slave)
+		master->cs = NULL;
+
+	rtdm_lock_put_irqrestore(&master->lock, c);
+
+	if (master->ops->close)
+		master->ops->close(slave);
+}
+
+static int do_chip_select(struct rtdm_spi_remote_slave *slave)
+{				/* master->bus_lock held */
+	struct rtdm_spi_master *master = slave->master;
+	rtdm_lockctx_t c;
+	int state;
+
+	if (slave->config.speed_hz == 0)
+		return -EINVAL; /* Setup is missing. */
+
+	/* Serialize with spi_master_close() */
+	rtdm_lock_get_irqsave(&master->lock, c);
+	
+	if (master->cs != slave) {
+		if (gpio_is_valid(slave->cs_gpio)) {
+			state = !!(slave->config.mode & SPI_CS_HIGH);
+			gpiod_set_raw_value(slave->cs_gpiod, state);
+		} else
+			master->ops->chip_select(slave, true);
+		master->cs = slave;
+	}
+
+	rtdm_lock_put_irqrestore(&master->lock, c);
+
+	return 0;
+}
+
+static void do_chip_deselect(struct rtdm_spi_remote_slave *slave)
+{				/* master->bus_lock held */
+	struct rtdm_spi_master *master = slave->master;
+	rtdm_lockctx_t c;
+	int state;
+
+	rtdm_lock_get_irqsave(&master->lock, c);
+
+	if (gpio_is_valid(slave->cs_gpio)) {
+		state = !(slave->config.mode & SPI_CS_HIGH);
+		gpiod_set_raw_value(slave->cs_gpiod, state);
+	} else
+		master->ops->chip_select(slave, false);
+
+	master->cs = NULL;
+
+	rtdm_lock_put_irqrestore(&master->lock, c);
+}
+
+static int spi_master_ioctl_rt(struct rtdm_fd *fd,
+			       unsigned int request, void *arg)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	struct rtdm_spi_config config;
+	int ret, len;
+
+	switch (request) {
+	case SPI_RTIOC_SET_CONFIG:
+		ret = rtdm_safe_copy_from_user(fd, &config,
+					       arg, sizeof(config));
+		if (ret == 0)
+			ret = update_slave_config(slave, &config);
+		break;
+	case SPI_RTIOC_GET_CONFIG:
+		rtdm_mutex_lock(&master->bus_lock);
+		config = slave->config;
+		rtdm_mutex_unlock(&master->bus_lock);
+		ret = rtdm_safe_copy_to_user(fd, arg,
+					     &config, sizeof(config));
+		break;
+	case SPI_RTIOC_TRANSFER:
+		ret = -EINVAL;
+		if (master->ops->transfer_iobufs) {
+			rtdm_mutex_lock(&master->bus_lock);
+			ret = do_chip_select(slave);
+			if (ret == 0) {
+				ret = master->ops->transfer_iobufs(slave);
+				do_chip_deselect(slave);
+			}
+			rtdm_mutex_unlock(&master->bus_lock);
+		}
+		break;
+	case SPI_RTIOC_TRANSFER_N:
+		ret = -EINVAL;
+		if (master->ops->transfer_iobufs_n) {
+			len = (long)arg;
+			rtdm_mutex_lock(&master->bus_lock);
+			ret = do_chip_select(slave);
+			if (ret == 0) {
+				ret = master->ops->transfer_iobufs_n(slave, len);
+				do_chip_deselect(slave);
+			}
+			rtdm_mutex_unlock(&master->bus_lock);
+		}
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static int spi_master_ioctl_nrt(struct rtdm_fd *fd,
+				unsigned int request, void *arg)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	struct rtdm_spi_iobufs iobufs;
+	int ret;
+
+	switch (request) {
+	case SPI_RTIOC_SET_IOBUFS:
+		ret = rtdm_safe_copy_from_user(fd, &iobufs,
+					       arg, sizeof(iobufs));
+		if (ret)
+			break;
+		/*
+		 * No transfer can happen without I/O buffers being
+		 * set, and I/O buffers cannot be reset, therefore we
+		 * need no serialization with the transfer code here.
+		 */
+		mutex_lock(&slave->ctl_lock);
+		ret = master->ops->set_iobufs(slave, &iobufs);
+		mutex_unlock(&slave->ctl_lock);
+		if (ret == 0)
+			ret = rtdm_safe_copy_to_user(fd, arg,
+					     &iobufs, sizeof(iobufs));
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static ssize_t spi_master_read_rt(struct rtdm_fd *fd,
+				  void __user *u_buf, size_t len)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	void *rx;
+	int ret;
+
+	if (len == 0)
+		return 0;
+
+	rx = xnmalloc(len);
+	if (rx == NULL)
+		return -ENOMEM;
+
+	rtdm_mutex_lock(&master->bus_lock);
+	ret = do_chip_select(slave);
+	if (ret == 0) {
+		ret = master->ops->read(slave, rx, len);
+		do_chip_deselect(slave);
+	}
+	rtdm_mutex_unlock(&master->bus_lock);
+	if (ret > 0)
+		ret = rtdm_safe_copy_to_user(fd, u_buf, rx, ret);
+	
+	xnfree(rx);
+	
+	return ret;
+}
+
+static ssize_t spi_master_write_rt(struct rtdm_fd *fd,
+				   const void __user *u_buf, size_t len)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	void *tx;
+	int ret;
+
+	if (len == 0)
+		return 0;
+
+	tx = xnmalloc(len);
+	if (tx == NULL)
+		return -ENOMEM;
+
+	ret = rtdm_safe_copy_from_user(fd, tx, u_buf, len);
+	if (ret == 0) {
+		rtdm_mutex_lock(&master->bus_lock);
+		ret = do_chip_select(slave);
+		if (ret == 0) {
+			ret = master->ops->write(slave, tx, len);
+			do_chip_deselect(slave);
+		}
+		rtdm_mutex_unlock(&master->bus_lock);
+	}
+	
+	xnfree(tx);
+
+	return ret;
+}
+
+static void iobufs_vmopen(struct vm_area_struct *vma)
+{
+	struct rtdm_spi_remote_slave *slave = vma->vm_private_data;
+
+	atomic_inc(&slave->mmap_refs);
+	dev_dbg(slave_to_kdev(slave), "mapping added\n");
+}
+
+static void iobufs_vmclose(struct vm_area_struct *vma)
+{
+	struct rtdm_spi_remote_slave *slave = vma->vm_private_data;
+
+	if (atomic_dec_and_test(&slave->mmap_refs)) {
+		slave->master->ops->mmap_release(slave);
+		dev_dbg(slave_to_kdev(slave), "mapping released\n");
+	}
+}
+
+static struct vm_operations_struct iobufs_vmops = {
+	.open = iobufs_vmopen,
+	.close = iobufs_vmclose,
+};
+
+static int spi_master_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	int ret;
+
+	if (slave->master->ops->mmap_iobufs == NULL)
+		return -EINVAL;
+
+	ret = slave->master->ops->mmap_iobufs(slave, vma);
+	if (ret)
+		return ret;
+
+	dev_dbg(slave_to_kdev(slave), "mapping created\n");
+	atomic_inc(&slave->mmap_refs);
+
+	if (slave->master->ops->mmap_release) {
+		vma->vm_ops = &iobufs_vmops;
+		vma->vm_private_data = slave;
+	}
+
+	return 0;
+}
+
+static char *spi_slave_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "rtdm/%s/%s",
+			 dev->class->name,
+			 dev_name(dev));
+}
+
+struct rtdm_spi_master *
+__rtdm_spi_alloc_master(struct device *dev, size_t size, int off)
+{
+	struct rtdm_spi_master *master;
+	struct spi_master *kmaster;
+
+	kmaster = spi_alloc_master(dev, size);
+	if (kmaster == NULL)
+		return NULL;
+	
+	master = (void *)(kmaster + 1) + off;
+	master->kmaster = kmaster;
+	spi_master_set_devdata(kmaster, master);
+
+	return master;
+}
+EXPORT_SYMBOL_GPL(__rtdm_spi_alloc_master);
+
+int __rtdm_spi_setup_driver(struct rtdm_spi_master *master)
+{
+	master->classname = kstrdup(
+		dev_name(&master->kmaster->dev), GFP_KERNEL);
+	master->devclass = class_create(THIS_MODULE,
+		master->classname);
+	if (IS_ERR(master->devclass)) {
+		kfree(master->classname);
+		printk(XENO_ERR "cannot create sysfs class\n");
+		return PTR_ERR(master->devclass);
+	}
+
+	master->devclass->devnode = spi_slave_devnode;
+	master->cs = NULL;
+
+	master->driver.profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(rtdm_spi_master,
+				  RTDM_CLASS_SPI,
+				  master->subclass,
+				  0);
+	master->driver.device_flags = RTDM_NAMED_DEVICE;
+	master->driver.base_minor = 0;
+	master->driver.device_count = 256;
+	master->driver.context_size = 0;
+	master->driver.ops = (struct rtdm_fd_ops){
+		.open		=	spi_master_open,
+		.close		=	spi_master_close,
+		.read_rt	=	spi_master_read_rt,
+		.write_rt	=	spi_master_write_rt,
+		.ioctl_rt	=	spi_master_ioctl_rt,
+		.ioctl_nrt	=	spi_master_ioctl_nrt,
+		.mmap		=	spi_master_mmap,
+	};
+	
+	rtdm_drv_set_sysclass(&master->driver, master->devclass);
+
+	INIT_LIST_HEAD(&master->slaves);
+	rtdm_lock_init(&master->lock);
+	rtdm_mutex_init(&master->bus_lock);
+
+	return 0;
+}
+
+static int spi_transfer_one_unimp(struct spi_master *master,
+				  struct spi_device *spi,
+				  struct spi_transfer *tfr)
+{
+	return -ENODEV;
+}
+
+int rtdm_spi_add_master(struct rtdm_spi_master *master)
+{
+	struct spi_master *kmaster = master->kmaster;
+
+	/*
+	 * Prevent the transfer handler to be called from the regular
+	 * SPI stack, just in case.
+	 */
+	kmaster->transfer_one = spi_transfer_one_unimp;
+	master->devclass = NULL;
+
+	/*
+	 * Add the core SPI driver, devices on the bus will be
+	 * enumerated, handed to spi_device_probe().
+	 */
+	return spi_register_master(kmaster);
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_add_master);
+
+void rtdm_spi_remove_master(struct rtdm_spi_master *master)
+{
+	struct class *class = master->devclass;
+	char *classname = master->classname;
+	
+	rtdm_mutex_destroy(&master->bus_lock);
+	spi_unregister_master(master->kmaster);
+	rtdm_drv_set_sysclass(&master->driver, NULL);
+	class_destroy(class);
+	kfree(classname);
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_remove_master);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/spi/spi-sun6i.c	2022-03-21 12:58:31.436869000 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/spi/spi-bcm2835.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * I/O handling lifted from drivers/spi/spi-sun6i.c:
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * RTDM integration by:
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include "spi-master.h"
+
+#define RTDM_SUBCLASS_SUN6I  2
+
+#define SUN6I_GBL_CTL_REG		0x04
+#define SUN6I_GBL_CTL_BUS_ENABLE	BIT(0)
+#define SUN6I_GBL_CTL_MASTER		BIT(1)
+#define SUN6I_GBL_CTL_TP		BIT(7)
+#define SUN6I_GBL_CTL_RST		BIT(31)
+
+#define SUN6I_TFR_CTL_REG		0x08
+#define SUN6I_TFR_CTL_CPHA		BIT(0)
+#define SUN6I_TFR_CTL_CPOL		BIT(1)
+#define SUN6I_TFR_CTL_SPOL		BIT(2)
+#define SUN6I_TFR_CTL_CS_MASK		0x30
+#define SUN6I_TFR_CTL_CS(cs)		(((cs) << 4) & SUN6I_TFR_CTL_CS_MASK)
+#define SUN6I_TFR_CTL_CS_MANUAL		BIT(6)
+#define SUN6I_TFR_CTL_CS_LEVEL		BIT(7)
+#define SUN6I_TFR_CTL_DHB		BIT(8)
+#define SUN6I_TFR_CTL_FBS		BIT(12)
+#define SUN6I_TFR_CTL_XCH		BIT(31)
+
+#define SUN6I_INT_CTL_REG		0x10
+#define SUN6I_INT_CTL_RX_RDY		BIT(0)
+#define SUN6I_INT_CTL_TX_RDY		BIT(4)
+#define SUN6I_INT_CTL_RX_OVF		BIT(8)
+#define SUN6I_INT_CTL_TC		BIT(12)
+
+#define SUN6I_INT_STA_REG		0x14
+
+#define SUN6I_FIFO_CTL_REG		0x18
+#define SUN6I_FIFO_CTL_RX_RDY_TRIG_LEVEL_MASK	0xff
+#define SUN6I_FIFO_CTL_RX_RDY_TRIG_LEVEL_BITS	0
+#define SUN6I_FIFO_CTL_RX_RST			BIT(15)
+#define SUN6I_FIFO_CTL_TX_RDY_TRIG_LEVEL_MASK	0xff
+#define SUN6I_FIFO_CTL_TX_RDY_TRIG_LEVEL_BITS	16
+#define SUN6I_FIFO_CTL_TX_RST			BIT(31)
+
+#define SUN6I_FIFO_STA_REG		0x1c
+#define SUN6I_FIFO_STA_RX_CNT(reg)	(((reg) >> 0) & 0xff)
+#define SUN6I_FIFO_STA_TX_CNT(reg)	(((reg) >> 16) & 0xff)
+
+#define SUN6I_CLK_CTL_REG		0x24
+#define SUN6I_CLK_CTL_CDR2_MASK		0xff
+#define SUN6I_CLK_CTL_CDR2(div)		(((div) & SUN6I_CLK_CTL_CDR2_MASK) << 0)
+#define SUN6I_CLK_CTL_CDR1_MASK		0xf
+#define SUN6I_CLK_CTL_CDR1(div)		(((div) & SUN6I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN6I_CLK_CTL_DRS		BIT(12)
+
+#define SUN6I_MAX_XFER_SIZE		0xffffff
+
+#define SUN6I_BURST_CNT_REG		0x30
+#define SUN6I_BURST_CNT(cnt)		((cnt) & SUN6I_MAX_XFER_SIZE)
+
+#define SUN6I_XMIT_CNT_REG		0x34
+#define SUN6I_XMIT_CNT(cnt)		((cnt) & SUN6I_MAX_XFER_SIZE)
+
+#define SUN6I_BURST_CTL_CNT_REG		0x38
+#define SUN6I_BURST_CTL_CNT_STC(cnt)	((cnt) & SUN6I_MAX_XFER_SIZE)
+
+#define SUN6I_TXDATA_REG		0x200
+#define SUN6I_RXDATA_REG		0x300
+
+#define SUN6I_SPI_MODE_BITS	(SPI_CPOL | SPI_CPHA | SPI_CS_HIGH	\
+				 | SPI_LSB_FIRST)
+
+	struct spi_setup_data {
+		int fifo_depth;
+	};
+
+static struct spi_setup_data sun6i_data = {
+	.fifo_depth = 128,
+};
+
+static struct spi_setup_data sun8i_data = {
+	.fifo_depth = 64,
+};
+
+struct spi_master_sun6i {
+	struct rtdm_spi_master master;
+	void __iomem *regs;
+	struct reset_control *rstc;
+	struct clk *hclk;
+	struct clk *mclk;
+	unsigned long clk_hz;
+	rtdm_irq_t irqh;
+	const u8 *tx_buf;
+	u8 *rx_buf;
+	int tx_len;
+	int rx_len;
+	rtdm_event_t transfer_done;
+	const struct spi_setup_data *setup;
+};
+
+struct spi_slave_sun6i {
+	struct rtdm_spi_remote_slave slave;
+	void *io_virt;
+	dma_addr_t io_dma;
+	size_t io_len;
+};
+
+static inline struct spi_slave_sun6i *
+to_slave_sun6i(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave, struct spi_slave_sun6i, slave);
+}
+
+static inline struct spi_master_sun6i *
+to_master_sun6i(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave->master, struct spi_master_sun6i, master);
+}
+
+static inline struct device *
+master_to_kdev(struct rtdm_spi_master *master)
+{
+	return &master->kmaster->dev;
+}
+
+static inline u32 sun6i_rd(struct spi_master_sun6i *spim,
+			   unsigned int reg)
+{
+	return readl(spim->regs + reg);
+}
+
+static inline void sun6i_wr(struct spi_master_sun6i *spim,
+			    unsigned int reg, u32 val)
+{
+	writel(val, spim->regs + reg);
+}
+
+static void sun6i_rd_fifo(struct spi_master_sun6i *spim)
+{
+	u32 reg;
+	int len;
+	u8 byte;
+
+	reg = sun6i_rd(spim, SUN6I_FIFO_STA_REG);
+	len = min((int)SUN6I_FIFO_STA_RX_CNT(reg), spim->rx_len);
+
+	while (len-- > 0) {
+		byte = sun6i_rd(spim, SUN6I_RXDATA_REG);
+		if (spim->rx_buf)
+			*spim->rx_buf++ = byte;
+		spim->rx_len--;
+	}
+}
+
+static void sun6i_wr_fifo(struct spi_master_sun6i *spim)
+{
+	u32 reg;
+	int len;
+	u8 byte;
+
+	reg = sun6i_rd(spim, SUN6I_FIFO_STA_REG);
+	len = min(spim->setup->fifo_depth - (int)SUN6I_FIFO_STA_TX_CNT(reg),
+		  spim->tx_len);
+	
+	while (len-- > 0) {
+		byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		sun6i_wr(spim, SUN6I_TXDATA_REG, byte);
+		spim->tx_len--;
+	}
+}
+
+static int sun6i_spi_interrupt(rtdm_irq_t *irqh)
+{
+	struct spi_master_sun6i *spim;
+	u32 status;
+
+	spim = rtdm_irq_get_arg(irqh, struct spi_master_sun6i);
+
+	sun6i_rd_fifo(spim);
+	sun6i_wr_fifo(spim);
+	
+	status = sun6i_rd(spim, SUN6I_INT_STA_REG);
+	if ((status & SUN6I_INT_CTL_TC)) {
+		sun6i_wr(spim, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
+		sun6i_wr(spim, SUN6I_INT_CTL_REG, 0);
+		rtdm_event_signal(&spim->transfer_done);
+	} else if (status & SUN6I_INT_CTL_TX_RDY)
+		sun6i_wr(spim, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TX_RDY);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int sun6i_configure(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	u32 reg, div;
+	
+	/* Set clock polarity and phase. */
+
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	reg &= ~(SUN6I_TFR_CTL_CPOL | SUN6I_TFR_CTL_CPHA |
+		 SUN6I_TFR_CTL_FBS | SUN6I_TFR_CTL_SPOL);
+
+	/* Manual CS via ->chip_select(). */
+	reg |= SUN6I_TFR_CTL_CS_MANUAL;
+
+	if (config->mode & SPI_CPOL)
+		reg |= SUN6I_TFR_CTL_CPOL;
+
+	if (config->mode & SPI_CPHA)
+		reg |= SUN6I_TFR_CTL_CPHA;
+
+	if (config->mode & SPI_LSB_FIRST)
+		reg |= SUN6I_TFR_CTL_FBS;
+
+	if (!(config->mode & SPI_CS_HIGH))
+		reg |= SUN6I_TFR_CTL_SPOL;
+
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg);
+	
+	/* Setup clock divider. */
+
+	div = spim->clk_hz / (2 * config->speed_hz);
+	if (div <= SUN6I_CLK_CTL_CDR2_MASK + 1) {
+		if (div > 0)
+			div--;
+		reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
+	} else {
+		div = ilog2(spim->clk_hz) - ilog2(config->speed_hz);
+		reg = SUN6I_CLK_CTL_CDR1(div);
+	}
+
+	sun6i_wr(spim, SUN6I_CLK_CTL_REG, reg);
+
+	return 0;
+}
+
+static void sun6i_chip_select(struct rtdm_spi_remote_slave *slave,
+			      bool active)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	u32 reg;
+
+	/*
+	 * We have no cs_gpios, so this handler will be called for
+	 * each transfer.
+	 */
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	reg &= ~(SUN6I_TFR_CTL_CS_MASK | SUN6I_TFR_CTL_CS_LEVEL);
+	reg |= SUN6I_TFR_CTL_CS(slave->chip_select);
+
+	if (active)
+		reg |= SUN6I_TFR_CTL_CS_LEVEL;
+
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg);
+}
+
+static int do_transfer_irq(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	u32 tx_len = 0, reg;
+	int ret;
+
+	/* Reset FIFO. */
+	sun6i_wr(spim, SUN6I_FIFO_CTL_REG,
+		 SUN6I_FIFO_CTL_RX_RST | SUN6I_FIFO_CTL_TX_RST);
+
+	/* Set FIFO interrupt trigger level to 3/4 of the fifo depth. */
+	reg = spim->setup->fifo_depth / 4 * 3;
+	sun6i_wr(spim, SUN6I_FIFO_CTL_REG,
+		 (reg << SUN6I_FIFO_CTL_RX_RDY_TRIG_LEVEL_BITS) |
+		 (reg << SUN6I_FIFO_CTL_TX_RDY_TRIG_LEVEL_BITS));
+
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	reg &= ~SUN6I_TFR_CTL_DHB;
+	/* Discard unused SPI bursts if TX only. */
+	if (spim->rx_buf == NULL)
+		reg |= SUN6I_TFR_CTL_DHB;
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg);
+
+	if (spim->tx_buf)
+		tx_len = spim->tx_len;
+
+	/* Setup the counters. */
+	sun6i_wr(spim, SUN6I_BURST_CNT_REG, SUN6I_BURST_CNT(spim->tx_len));
+	sun6i_wr(spim, SUN6I_XMIT_CNT_REG, SUN6I_XMIT_CNT(tx_len));
+	sun6i_wr(spim, SUN6I_BURST_CTL_CNT_REG,
+		 SUN6I_BURST_CTL_CNT_STC(tx_len));
+
+	/* Fill the TX FIFO */
+	sun6i_wr_fifo(spim);
+
+	/* Enable interrupts. */
+	reg = sun6i_rd(spim, SUN6I_INT_CTL_REG);
+	reg |= SUN6I_INT_CTL_TC | SUN6I_INT_CTL_TX_RDY;
+	sun6i_wr(spim, SUN6I_INT_CTL_REG, reg);
+
+	/* Start the transfer. */
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+	
+	ret = rtdm_event_wait(&spim->transfer_done);
+	if (ret) {
+		sun6i_wr(spim, SUN6I_INT_CTL_REG, 0);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int sun6i_transfer_iobufs(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	if (sun6i->io_len == 0)
+		return -EINVAL;	/* No I/O buffers set. */
+	
+	spim->tx_len = sun6i->io_len / 2;
+	spim->rx_len = spim->tx_len;
+	spim->tx_buf = sun6i->io_virt + spim->rx_len;
+	spim->rx_buf = sun6i->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static int sun6i_transfer_iobufs_n(struct rtdm_spi_remote_slave *slave,
+				   int len)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	if ((sun6i->io_len == 0) ||
+		(len <= 0) || (len > (sun6i->io_len / 2)))
+		return -EINVAL;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = sun6i->io_virt + sun6i->io_len / 2;
+	spim->rx_buf = sun6i->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static ssize_t sun6i_read(struct rtdm_spi_remote_slave *slave,
+			  void *rx, size_t len)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = NULL;
+	spim->rx_buf = rx;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static ssize_t sun6i_write(struct rtdm_spi_remote_slave *slave,
+			   const void *tx, size_t len)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = tx;
+	spim->rx_buf = NULL;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static int set_iobufs(struct spi_slave_sun6i *sun6i, size_t len)
+{
+	dma_addr_t dma;
+	void *p;
+
+	if (len == 0)
+		return -EINVAL;
+	
+	len = L1_CACHE_ALIGN(len) * 2;
+	if (len == sun6i->io_len)
+		return 0;
+
+	if (sun6i->io_len)
+		return -EINVAL;	/* I/O buffers may not be resized. */
+
+	p = dma_alloc_coherent(NULL, len, &dma, GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	sun6i->io_dma = dma;
+	sun6i->io_virt = p;
+	smp_mb();
+	sun6i->io_len = len;
+	
+	return 0;
+}
+
+static int sun6i_set_iobufs(struct rtdm_spi_remote_slave *slave,
+			    struct rtdm_spi_iobufs *p)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+	int ret;
+
+	ret = set_iobufs(sun6i, p->io_len);
+	if (ret)
+		return ret;
+
+	p->i_offset = 0;
+	p->o_offset = sun6i->io_len / 2;
+	p->map_len = sun6i->io_len;
+	
+	return 0;
+}
+
+static int sun6i_mmap_iobufs(struct rtdm_spi_remote_slave *slave,
+			     struct vm_area_struct *vma)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	return rtdm_mmap_kmem(vma, sun6i->io_virt);
+}
+
+static void sun6i_mmap_release(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	dma_free_coherent(NULL, sun6i->io_len,
+			  sun6i->io_virt, sun6i->io_dma);
+	sun6i->io_len = 0;
+}
+
+static struct rtdm_spi_remote_slave *
+sun6i_attach_slave(struct rtdm_spi_master *master, struct spi_device *spi)
+{
+	struct spi_slave_sun6i *sun6i;
+	int ret;
+
+	sun6i = kzalloc(sizeof(*sun6i), GFP_KERNEL);
+	if (sun6i == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_spi_add_remote_slave(&sun6i->slave, master, spi);
+	if (ret) {
+		dev_err(&spi->dev,
+			"%s: failed to attach slave\n", __func__);
+		kfree(sun6i);
+		return ERR_PTR(ret);
+	}
+
+	return &sun6i->slave;
+}
+
+static void sun6i_detach_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	rtdm_spi_remove_remote_slave(slave);
+	kfree(sun6i);
+}
+
+static struct rtdm_spi_master_ops sun6i_master_ops = {
+	.configure = sun6i_configure,
+	.chip_select = sun6i_chip_select,
+	.set_iobufs = sun6i_set_iobufs,
+	.mmap_iobufs = sun6i_mmap_iobufs,
+	.mmap_release = sun6i_mmap_release,
+	.transfer_iobufs = sun6i_transfer_iobufs,
+	.transfer_iobufs_n = sun6i_transfer_iobufs_n,
+	.write = sun6i_write,
+	.read = sun6i_read,
+	.attach_slave = sun6i_attach_slave,
+	.detach_slave = sun6i_detach_slave,
+};
+
+static int sun6i_spi_probe(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master;
+	struct spi_master_sun6i *spim;
+	struct spi_master *kmaster;
+	struct resource *r;
+	int ret, irq;
+	u32 clk_rate;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	master = rtdm_spi_alloc_master(&pdev->dev,
+				       struct spi_master_sun6i, master);
+	if (master == NULL)
+		return -ENOMEM;
+
+	master->subclass = RTDM_SUBCLASS_SUN6I;
+	master->ops = &sun6i_master_ops;
+	platform_set_drvdata(pdev, master);
+
+	kmaster = master->kmaster;
+	kmaster->max_speed_hz = 100 * 1000 * 1000;
+	kmaster->min_speed_hz = 3 * 1000;
+	kmaster->mode_bits = SUN6I_SPI_MODE_BITS;
+	kmaster->bits_per_word_mask = SPI_BPW_MASK(8);
+	kmaster->num_chipselect = 4;
+	kmaster->dev.of_node = pdev->dev.of_node;
+
+	spim = container_of(master, struct spi_master_sun6i, master);
+	spim->setup = of_device_get_match_data(&pdev->dev);
+
+	rtdm_event_init(&spim->transfer_done, 0);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spim->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(spim->regs)) {
+		dev_err(&pdev->dev, "%s: cannot map I/O memory\n", __func__);
+		ret = PTR_ERR(spim->regs);
+		goto fail;
+	}
+	
+	spim->hclk = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(spim->hclk)) {
+		dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+		ret = PTR_ERR(spim->hclk);
+		goto fail;
+	}
+
+	spim->mclk = devm_clk_get(&pdev->dev, "mod");
+	if (IS_ERR(spim->mclk)) {
+		dev_err(&pdev->dev, "Unable to acquire MOD clock\n");
+		ret = PTR_ERR(spim->mclk);
+		goto fail;
+	}
+
+	spim->rstc = devm_reset_control_get(&pdev->dev, NULL);
+	if (IS_ERR(spim->rstc)) {
+		dev_err(&pdev->dev, "Couldn't get reset controller\n");
+		ret = PTR_ERR(spim->rstc);
+		goto fail;
+	}
+
+	/*
+	 * Ensure that we have a parent clock fast enough to handle
+	 * the fastest transfers properly.
+	 */
+	clk_rate = clk_get_rate(spim->mclk);
+	if (clk_rate < 2 * kmaster->max_speed_hz)
+		clk_set_rate(spim->mclk, 2 * kmaster->max_speed_hz);
+
+	spim->clk_hz = clk_get_rate(spim->mclk);
+
+	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (irq <= 0) {
+		ret = irq ?: -ENODEV;
+		goto fail;
+	}
+
+	clk_prepare_enable(spim->hclk);
+	clk_prepare_enable(spim->mclk);
+
+	ret = reset_control_deassert(spim->rstc);
+	if (ret)
+		goto fail_unclk;
+
+	/* Enable SPI module, in master mode with smart burst. */
+
+	sun6i_wr(spim, SUN6I_GBL_CTL_REG,
+		 SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER |
+		 SUN6I_GBL_CTL_TP);
+
+	/* Disable and clear all interrupts. */
+	sun6i_wr(spim, SUN6I_INT_CTL_REG, 0);
+	sun6i_wr(spim, SUN6I_INT_STA_REG, ~0);
+	
+	ret = rtdm_irq_request(&spim->irqh, irq,
+			       sun6i_spi_interrupt, 0,
+			       dev_name(&pdev->dev), spim);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: cannot request IRQ%d\n",
+			__func__, irq);
+		goto fail_unclk;
+	}
+
+	ret = rtdm_spi_add_master(&spim->master);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: failed to add master\n",
+			__func__);
+		goto fail_register;
+	}
+
+	return 0;
+
+fail_register:
+	rtdm_irq_free(&spim->irqh);
+fail_unclk:
+	clk_disable_unprepare(spim->mclk);
+	clk_disable_unprepare(spim->hclk);
+fail:
+	spi_master_put(kmaster);
+
+	return ret;
+}
+
+static int sun6i_spi_remove(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master_sun6i *spim;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	spim = container_of(master, struct spi_master_sun6i, master);
+
+	rtdm_irq_free(&spim->irqh);
+
+	clk_disable_unprepare(spim->mclk);
+	clk_disable_unprepare(spim->hclk);
+
+	rtdm_spi_remove_master(master);
+
+	return 0;
+}
+
+static const struct of_device_id sun6i_spi_match[] = {
+	{
+		.compatible = "allwinner,sun6i-a31-spi",
+		.data = &sun6i_data,
+	},
+	{
+		.compatible = "allwinner,sun8i-h3-spi",
+		.data = &sun8i_data,
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sun6i_spi_match);
+
+static struct platform_driver sun6i_spi_driver = {
+	.driver		= {
+		.name		= "spi-sun6i",
+		.of_match_table	= sun6i_spi_match,
+	},
+	.probe		= sun6i_spi_probe,
+	.remove		= sun6i_spi_remove,
+};
+module_platform_driver(sun6i_spi_driver);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/spi/spi-bcm2835.c	2022-03-21 12:58:31.428869078 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/**
+ * I/O handling lifted from drivers/spi/spi-bcm2835.c:
+ * Copyright (C) 2012 Chris Boot
+ * Copyright (C) 2013 Stephen Warren
+ * Copyright (C) 2015 Martin Sperl
+ *
+ * RTDM integration by:
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include "spi-master.h"
+
+#define RTDM_SUBCLASS_BCM2835  1
+
+/* SPI register offsets */
+#define BCM2835_SPI_CS			0x00
+#define BCM2835_SPI_FIFO		0x04
+#define BCM2835_SPI_CLK			0x08
+#define BCM2835_SPI_DLEN		0x0c
+#define BCM2835_SPI_LTOH		0x10
+#define BCM2835_SPI_DC			0x14
+
+/* Bitfields in CS */
+#define BCM2835_SPI_CS_LEN_LONG		0x02000000
+#define BCM2835_SPI_CS_DMA_LEN		0x01000000
+#define BCM2835_SPI_CS_CSPOL2		0x00800000
+#define BCM2835_SPI_CS_CSPOL1		0x00400000
+#define BCM2835_SPI_CS_CSPOL0		0x00200000
+#define BCM2835_SPI_CS_RXF		0x00100000
+#define BCM2835_SPI_CS_RXR		0x00080000
+#define BCM2835_SPI_CS_TXD		0x00040000
+#define BCM2835_SPI_CS_RXD		0x00020000
+#define BCM2835_SPI_CS_DONE		0x00010000
+#define BCM2835_SPI_CS_LEN		0x00002000
+#define BCM2835_SPI_CS_REN		0x00001000
+#define BCM2835_SPI_CS_ADCS		0x00000800
+#define BCM2835_SPI_CS_INTR		0x00000400
+#define BCM2835_SPI_CS_INTD		0x00000200
+#define BCM2835_SPI_CS_DMAEN		0x00000100
+#define BCM2835_SPI_CS_TA		0x00000080
+#define BCM2835_SPI_CS_CSPOL		0x00000040
+#define BCM2835_SPI_CS_CLEAR_RX		0x00000020
+#define BCM2835_SPI_CS_CLEAR_TX		0x00000010
+#define BCM2835_SPI_CS_CPOL		0x00000008
+#define BCM2835_SPI_CS_CPHA		0x00000004
+#define BCM2835_SPI_CS_CS_10		0x00000002
+#define BCM2835_SPI_CS_CS_01		0x00000001
+
+#define BCM2835_SPI_POLLING_LIMIT_US	30
+#define BCM2835_SPI_POLLING_JIFFIES	2
+#define BCM2835_SPI_DMA_MIN_LENGTH	96
+#define BCM2835_SPI_MODE_BITS	(SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
+				| SPI_NO_CS | SPI_3WIRE)
+
+struct spi_master_bcm2835 {
+	struct rtdm_spi_master master;
+	void __iomem *regs;
+	struct clk *clk;
+	unsigned long clk_hz;
+	rtdm_irq_t irqh;
+	const u8 *tx_buf;
+	u8 *rx_buf;
+	int tx_len;
+	int rx_len;
+	rtdm_event_t transfer_done;
+};
+
+struct spi_slave_bcm2835 {
+	struct rtdm_spi_remote_slave slave;
+	void *io_virt;
+	dma_addr_t io_dma;
+	size_t io_len;
+};
+
+static inline struct spi_slave_bcm2835 *
+to_slave_bcm2835(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave, struct spi_slave_bcm2835, slave);
+}
+
+static inline struct spi_master_bcm2835 *
+to_master_bcm2835(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave->master, struct spi_master_bcm2835, master);
+}
+
+static inline struct device *
+master_to_kdev(struct rtdm_spi_master *master)
+{
+	return &master->kmaster->dev;
+}
+
+static inline u32 bcm2835_rd(struct spi_master_bcm2835 *spim,
+			     unsigned int reg)
+{
+	return readl(spim->regs + reg);
+}
+
+static inline void bcm2835_wr(struct spi_master_bcm2835 *spim,
+			      unsigned int reg, u32 val)
+{
+	writel(val, spim->regs + reg);
+}
+
+static inline void bcm2835_rd_fifo(struct spi_master_bcm2835 *spim)
+{
+	u8 byte;
+
+	while (spim->rx_len > 0 &&
+	       (bcm2835_rd(spim, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
+		byte = bcm2835_rd(spim, BCM2835_SPI_FIFO);
+		if (spim->rx_buf)
+			*spim->rx_buf++ = byte;
+		spim->rx_len--;
+	}
+}
+
+static inline void bcm2835_wr_fifo(struct spi_master_bcm2835 *spim)
+{
+	u8 byte;
+
+	while (spim->tx_len > 0 &&
+	       (bcm2835_rd(spim, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
+		byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		bcm2835_wr(spim, BCM2835_SPI_FIFO, byte);
+		spim->tx_len--;
+	}
+}
+
+static void bcm2835_reset_hw(struct spi_master_bcm2835 *spim)
+{
+	u32 cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	cs &= ~(BCM2835_SPI_CS_INTR |
+		BCM2835_SPI_CS_INTD |
+		BCM2835_SPI_CS_DMAEN |
+		BCM2835_SPI_CS_TA);
+	cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
+
+	/* Reset the SPI block. */
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+	bcm2835_wr(spim, BCM2835_SPI_DLEN, 0);
+}
+
+static int bcm2835_spi_interrupt(rtdm_irq_t *irqh)
+{
+	struct spi_master_bcm2835 *spim;
+
+	spim = rtdm_irq_get_arg(irqh, struct spi_master_bcm2835);
+
+	bcm2835_rd_fifo(spim);
+	bcm2835_wr_fifo(spim);
+
+	if (bcm2835_rd(spim, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
+		bcm2835_reset_hw(spim);
+		rtdm_event_signal(&spim->transfer_done);
+	}
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int bcm2835_configure(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	unsigned long spi_hz, cdiv;
+	u32 cs;
+
+	/* Set clock polarity and phase. */
+
+	cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
+	if (config->mode & SPI_CPOL)
+		cs |= BCM2835_SPI_CS_CPOL;
+	if (config->mode & SPI_CPHA)
+		cs |= BCM2835_SPI_CS_CPHA;
+
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+	
+	/* Set clock frequency. */
+
+	spi_hz = config->speed_hz;
+
+	/*
+	 * Fastest clock rate is of the APB clock, which is close to
+	 * clk_hz / 2.
+	 */
+	if (spi_hz >= spim->clk_hz / 2)
+		cdiv = 2;
+	else if (spi_hz) {
+		cdiv = DIV_ROUND_UP(spim->clk_hz, spi_hz); /* Multiple of 2. */
+		cdiv += (cdiv % 2);
+		if (cdiv >= 65536)
+			cdiv = 0;
+	} else
+		cdiv = 0;
+
+	bcm2835_wr(spim, BCM2835_SPI_CLK, cdiv);
+	
+	return 0;
+}
+
+static void bcm2835_chip_select(struct rtdm_spi_remote_slave *slave,
+				bool active)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	u32 cs;
+
+	cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	if (config->mode & SPI_CS_HIGH) {
+		cs |= BCM2835_SPI_CS_CSPOL;
+		cs |= BCM2835_SPI_CS_CSPOL0 << slave->chip_select;
+	} else {
+		cs &= ~BCM2835_SPI_CS_CSPOL;
+		cs &= ~(BCM2835_SPI_CS_CSPOL0 << slave->chip_select);
+	}
+
+	/* "active" is the logical state, not the impedance level. */
+
+	if (active) {
+		if (config->mode & SPI_NO_CS)
+			cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+		else {
+			cs &= ~(BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01);
+			cs |= slave->chip_select;
+		}
+	} else {
+		/* Put HW-CS into deselected state. */
+		cs &= ~BCM2835_SPI_CS_CSPOL;
+		/* Use the "undefined" chip-select as precaution. */
+		cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+	}
+
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+}
+
+static int do_transfer_irq(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	int ret;
+	u32 cs;
+	
+	cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	cs &= ~BCM2835_SPI_CS_REN;
+	if ((slave->config.mode & SPI_3WIRE) && spim->rx_buf)
+		cs |= BCM2835_SPI_CS_REN;
+
+	cs |= BCM2835_SPI_CS_TA;
+
+	/*
+	 * Fill in fifo if we have gpio-cs note that there have been
+	 * rare events where the native-CS flapped for <1us which may
+	 * change the behaviour with gpio-cs this does not happen, so
+	 * it is implemented only for this case.
+	 */
+	if (gpio_is_valid(slave->cs_gpio)) {
+		/* Set dummy CS, ->chip_select() was not called. */
+		cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+		/* Enable SPI block, before filling FIFO. */
+		bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+		bcm2835_wr_fifo(spim);
+	}
+
+	/* Enable interrupts last, wait for transfer completion. */
+	cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD;
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+
+	ret = rtdm_event_wait(&spim->transfer_done);
+	if (ret) {
+		bcm2835_reset_hw(spim);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int bcm2835_transfer_iobufs(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	if (bcm->io_len == 0)
+		return -EINVAL;	/* No I/O buffers set. */
+	
+	spim->tx_len = bcm->io_len / 2;
+	spim->rx_len = spim->tx_len;
+	spim->tx_buf = bcm->io_virt + spim->rx_len;
+	spim->rx_buf = bcm->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static int bcm2835_transfer_iobufs_n(struct rtdm_spi_remote_slave *slave,
+				     int len)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	if ((bcm->io_len == 0) ||
+		(len <= 0) || (len > (bcm->io_len / 2)))
+		return -EINVAL;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = bcm->io_virt + bcm->io_len / 2;
+	spim->rx_buf = bcm->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static ssize_t bcm2835_read(struct rtdm_spi_remote_slave *slave,
+			    void *rx, size_t len)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = NULL;
+	spim->rx_buf = rx;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static ssize_t bcm2835_write(struct rtdm_spi_remote_slave *slave,
+			     const void *tx, size_t len)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = tx;
+	spim->rx_buf = NULL;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static int set_iobufs(struct spi_slave_bcm2835 *bcm, size_t len)
+{
+	dma_addr_t dma;
+	void *p;
+
+	if (len == 0)
+		return -EINVAL;
+	
+	len = L1_CACHE_ALIGN(len) * 2;
+	if (len == bcm->io_len)
+		return 0;
+
+	if (bcm->io_len)
+		return -EINVAL;	/* I/O buffers may not be resized. */
+
+	/*
+	 * Since we need the I/O buffers to be set for starting a
+	 * transfer, there is no need for serializing this routine and
+	 * transfer_iobufs(), provided io_len is set last.
+	 *
+	 * NOTE: We don't need coherent memory until we actually get
+	 * DMA transfers working, this code is a bit ahead of
+	 * schedule.
+	 *
+	 * Revisit: this assumes DMA mask is 4Gb.
+	 */
+	p = dma_alloc_coherent(NULL, len, &dma, GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	bcm->io_dma = dma;
+	bcm->io_virt = p;
+	smp_mb();
+	/*
+	 * May race with transfer_iobufs(), must be assigned after all
+	 * the rest is set up, enforcing a membar.
+	 */
+	bcm->io_len = len;
+	
+	return 0;
+}
+
+static int bcm2835_set_iobufs(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_iobufs *p)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+	int ret;
+
+	ret = set_iobufs(bcm, p->io_len);
+	if (ret)
+		return ret;
+
+	p->i_offset = 0;
+	p->o_offset = bcm->io_len / 2;
+	p->map_len = bcm->io_len;
+	
+	return 0;
+}
+
+static int bcm2835_mmap_iobufs(struct rtdm_spi_remote_slave *slave,
+			       struct vm_area_struct *vma)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	/*
+	 * dma_alloc_coherent() delivers non-cached memory, make sure
+	 * to return consistent mapping attributes. Typically, mixing
+	 * memory attributes across address spaces referring to the
+	 * same physical area is architecturally wrong on ARM.
+	 */
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	return rtdm_mmap_kmem(vma, bcm->io_virt);
+}
+
+static void bcm2835_mmap_release(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	dma_free_coherent(NULL, bcm->io_len,
+			  bcm->io_virt, bcm->io_dma);
+	bcm->io_len = 0;
+}
+
+static int gpio_match_name(struct gpio_chip *chip, void *data)
+{
+	return !strcmp(chip->label, data);
+}
+
+static int find_cs_gpio(struct spi_device *spi)
+{
+	struct spi_master *kmaster = spi->master;
+	u32 pingroup_index, pin, pin_index;
+	struct device_node *pins;
+	struct gpio_chip *chip;
+	int ret;
+
+	if (gpio_is_valid(spi->cs_gpio)) {
+		dev_info(&spi->dev, "using GPIO%i for CS%d\n",
+			 spi->cs_gpio, spi->chip_select);
+		return 0;
+	}
+
+	/* Translate native CS to GPIO. */
+
+	for (pingroup_index = 0;
+	     (pins = of_parse_phandle(kmaster->dev.of_node,
+		     "pinctrl-0", pingroup_index)) != 0; pingroup_index++) {
+		for (pin_index = 0;
+		     of_property_read_u32_index(pins, "brcm,pins",
+				pin_index, &pin) == 0; pin_index++) {
+			if ((spi->chip_select == 0 &&
+			     (pin == 8 || pin == 36 || pin == 46)) ||
+			    (spi->chip_select == 1 &&
+			     (pin == 7 || pin == 35))) {
+				spi->cs_gpio = pin;
+				break;
+			}
+		}
+		of_node_put(pins);
+	}
+
+	/* If that failed, assume GPIOs 7-11 are used */
+	if (!gpio_is_valid(spi->cs_gpio) ) {
+		chip = gpiochip_find("pinctrl-bcm2835", gpio_match_name);
+		if (chip == NULL)
+			return 0;
+
+		spi->cs_gpio = chip->base + 8 - spi->chip_select;
+	}
+
+	dev_info(&spi->dev,
+		 "setting up native-CS%i as GPIO %i\n",
+		 spi->chip_select, spi->cs_gpio);
+
+	ret = gpio_direction_output(spi->cs_gpio,
+			    (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+	if (ret) {
+		dev_err(&spi->dev,
+			"could not set CS%i gpio %i as output: %i",
+			spi->chip_select, spi->cs_gpio, ret);
+		return ret;
+	}
+
+	/*
+	 * Force value on GPIO in case the pin controller does not
+	 * handle that properly when switching to output mode.
+	 */
+	gpio_set_value(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+
+	return 0;
+}
+
+static struct rtdm_spi_remote_slave *
+bcm2835_attach_slave(struct rtdm_spi_master *master, struct spi_device *spi)
+{
+	struct spi_slave_bcm2835 *bcm;
+	int ret;
+
+	if (spi->chip_select > 1) {
+		/*
+		 * Error in the case of native CS requested with CS >
+		 * 1 officially there is a CS2, but it is not
+		 * documented which GPIO is connected with that...
+		 */
+		dev_err(&spi->dev,
+			"%s: only two native chip-selects are supported\n",
+			__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = find_cs_gpio(spi);
+	if (ret)
+		return ERR_PTR(ret);
+	
+	bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
+	if (bcm == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_spi_add_remote_slave(&bcm->slave, master, spi);
+	if (ret) {
+		dev_err(&spi->dev,
+			"%s: failed to attach slave\n", __func__);
+		kfree(bcm);
+		return ERR_PTR(ret);
+	}
+
+	return &bcm->slave;
+}
+
+static void bcm2835_detach_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	rtdm_spi_remove_remote_slave(slave);
+	kfree(bcm);
+}
+
+static struct rtdm_spi_master_ops bcm2835_master_ops = {
+	.configure = bcm2835_configure,
+	.chip_select = bcm2835_chip_select,
+	.set_iobufs = bcm2835_set_iobufs,
+	.mmap_iobufs = bcm2835_mmap_iobufs,
+	.mmap_release = bcm2835_mmap_release,
+	.transfer_iobufs = bcm2835_transfer_iobufs,
+	.transfer_iobufs_n = bcm2835_transfer_iobufs_n,
+	.write = bcm2835_write,
+	.read = bcm2835_read,
+	.attach_slave = bcm2835_attach_slave,
+	.detach_slave = bcm2835_detach_slave,
+};
+
+static int bcm2835_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master_bcm2835 *spim;
+	struct rtdm_spi_master *master;
+	struct spi_master *kmaster;
+	struct resource *r;
+	int ret, irq;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	master = rtdm_spi_alloc_master(&pdev->dev,
+		   struct spi_master_bcm2835, master);
+	if (master == NULL)
+		return -ENOMEM;
+
+	master->subclass = RTDM_SUBCLASS_BCM2835;
+	master->ops = &bcm2835_master_ops;
+	platform_set_drvdata(pdev, master);
+
+	kmaster = master->kmaster;
+	kmaster->mode_bits = BCM2835_SPI_MODE_BITS;
+	kmaster->bits_per_word_mask = SPI_BPW_MASK(8);
+	kmaster->num_chipselect = 2;
+	kmaster->dev.of_node = pdev->dev.of_node;
+
+	spim = container_of(master, struct spi_master_bcm2835, master);
+	rtdm_event_init(&spim->transfer_done, 0);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spim->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(spim->regs)) {
+		dev_err(&pdev->dev, "%s: cannot map I/O memory\n", __func__);
+		ret = PTR_ERR(spim->regs);
+		goto fail;
+	}
+	
+	spim->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(spim->clk)) {
+		ret = PTR_ERR(spim->clk);
+		goto fail;
+	}
+
+	spim->clk_hz = clk_get_rate(spim->clk);
+
+	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (irq <= 0) {
+		ret = irq ?: -ENODEV;
+		goto fail;
+	}
+
+	clk_prepare_enable(spim->clk);
+
+	/* Initialise the hardware with the default polarities */
+	bcm2835_wr(spim, BCM2835_SPI_CS,
+		   BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+	ret = rtdm_irq_request(&spim->irqh, irq,
+			       bcm2835_spi_interrupt, 0,
+			       dev_name(&pdev->dev), spim);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: cannot request IRQ%d\n",
+			__func__, irq);
+		goto fail_unclk;
+	}
+
+	ret = rtdm_spi_add_master(&spim->master);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: failed to add master\n",
+			__func__);
+		goto fail_unclk;
+	}
+
+	return 0;
+
+fail_unclk:
+	clk_disable_unprepare(spim->clk);
+fail:
+	spi_master_put(kmaster);
+
+	return ret;
+}
+
+static int bcm2835_spi_remove(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master_bcm2835 *spim;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	spim = container_of(master, struct spi_master_bcm2835, master);
+
+	/* Clear FIFOs, and disable the HW block */
+	bcm2835_wr(spim, BCM2835_SPI_CS,
+		   BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+	rtdm_irq_free(&spim->irqh);
+
+	clk_disable_unprepare(spim->clk);
+
+	rtdm_spi_remove_master(master);
+
+	return 0;
+}
+
+static const struct of_device_id bcm2835_spi_match[] = {
+	{
+		.compatible = "brcm,bcm2835-spi",
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
+
+static struct platform_driver bcm2835_spi_driver = {
+	.driver		= {
+		.name		= "spi-bcm2835",
+		.of_match_table	= bcm2835_spi_match,
+	},
+	.probe		= bcm2835_spi_probe,
+	.remove		= bcm2835_spi_remove,
+};
+module_platform_driver(bcm2835_spi_driver);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/Kconfig	2022-03-21 12:58:31.421869147 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/udd/Kconfig	1970-01-01 01:00:00.000000000 +0100
+menu "Drivers"
+
+config XENO_OPT_RTDM_COMPAT_DEVNODE
+	bool "Enable legacy pathnames for named RTDM devices"
+	default y
+	help
+	This compatibility option allows applications to open named
+	RTDM devices using the legacy naming scheme, i.e.
+
+	fd = open("devname", ...);
+	   or
+	fd = open("/dev/devname", ...);
+
+	When such a request is received by RTDM, a warning message is
+	issued to the kernel log whenever XENO_OPT_DEBUG_LEGACY is
+	also enabled in the kernel configuration.
+
+	Applications should open named devices via their actual device
+	nodes instead, i.e.
+
+	fd = open("/dev/rtdm/devname", ...);
+
+source "drivers/xenomai/autotune/Kconfig"
+source "drivers/xenomai/serial/Kconfig"
+source "drivers/xenomai/testing/Kconfig"
+source "drivers/xenomai/can/Kconfig"
+source "drivers/xenomai/net/Kconfig"
+source "drivers/xenomai/analogy/Kconfig"
+source "drivers/xenomai/ipc/Kconfig"
+source "drivers/xenomai/udd/Kconfig"
+source "drivers/xenomai/gpio/Kconfig"
+source "drivers/xenomai/gpiopwm/Kconfig"
+source "drivers/xenomai/spi/Kconfig"
+
+endmenu
+++ linux-patched/drivers/xenomai/udd/Kconfig	2022-03-21 12:58:31.414869215 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/udd/udd.c	1970-01-01 01:00:00.000000000 +0100
+menu "UDD support"
+
+config XENO_DRIVERS_UDD
+	tristate "User-space device driver framework"
+	help
+
+	A RTDM-based driver for enabling interrupt control and I/O
+	memory access interfaces to user-space device drivers.
+
+endmenu
+++ linux-patched/drivers/xenomai/udd/udd.c	2022-03-21 12:58:31.406869293 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/udd/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <rtdm/cobalt.h>
+#include <rtdm/driver.h>
+#include <rtdm/udd.h>
+#include <pipeline/inband_work.h>
+
+struct udd_context {
+	u32 event_count;
+};
+
+static int udd_open(struct rtdm_fd *fd, int oflags)
+{
+	struct udd_context *context;
+	struct udd_device *udd;
+	int ret;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->ops.open) {
+		ret = udd->ops.open(fd, oflags);
+		if (ret)
+			return ret;
+	}
+
+	context = rtdm_fd_to_private(fd);
+	context->event_count = 0;
+
+	return 0;
+}
+
+static void udd_close(struct rtdm_fd *fd)
+{
+	struct udd_device *udd;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->ops.close)
+		udd->ops.close(fd);
+}
+
+static int udd_ioctl_rt(struct rtdm_fd *fd,
+			unsigned int request, void __user *arg)
+{
+	struct udd_signotify signfy;
+	struct udd_reserved *ur;
+	struct udd_device *udd;
+	rtdm_event_t done;
+	int ret;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->ops.ioctl) {
+		ret = udd->ops.ioctl(fd, request, arg);
+		if (ret != -ENOSYS)
+			return ret;
+	}
+
+	ur = &udd->__reserved;
+
+	switch (request) {
+	case UDD_RTIOC_IRQSIG:
+		ret = rtdm_safe_copy_from_user(fd, &signfy, arg, sizeof(signfy));
+		if (ret)
+			return ret;
+		/* Early check, we'll redo at each signal issue. */
+		if (signfy.pid <= 0)
+			ur->signfy.pid = -1;
+		else {
+			if (signfy.sig < SIGRTMIN || signfy.sig > SIGRTMAX)
+				return -EINVAL;
+			if (cobalt_thread_find_local(signfy.pid) == NULL)
+				return -EINVAL;
+			ur->signfy = signfy;
+		}
+		break;
+	case UDD_RTIOC_IRQEN:
+	case UDD_RTIOC_IRQDIS:
+		if (udd->irq == UDD_IRQ_NONE || udd->irq == UDD_IRQ_CUSTOM)
+			return -EIO;
+		rtdm_event_init(&done, 0);
+		if (request == UDD_RTIOC_IRQEN)
+			udd_enable_irq(udd, &done);
+		else
+			udd_disable_irq(udd, &done);
+		ret = rtdm_event_wait(&done);
+		if (ret != -EIDRM)
+			rtdm_event_destroy(&done);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static ssize_t udd_read_rt(struct rtdm_fd *fd,
+			   void __user *buf, size_t len)
+{
+	struct udd_context *context;
+	struct udd_reserved *ur;
+	struct udd_device *udd;
+	rtdm_lockctx_t ctx;
+	ssize_t ret = 0;
+	u32 count;
+
+	if (len != sizeof(count))
+		return -EINVAL;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->irq == UDD_IRQ_NONE)
+		return -EIO;
+
+	ur = &udd->__reserved;
+	context = rtdm_fd_to_private(fd);
+
+	cobalt_atomic_enter(ctx);
+
+	if (ur->event_count != context->event_count)
+		rtdm_event_clear(&ur->pulse);
+	else
+		ret = rtdm_event_wait(&ur->pulse);
+
+	count = ur->event_count;
+
+	cobalt_atomic_leave(ctx);
+
+	if (ret)
+		return ret;
+
+	context->event_count = count;
+	ret = rtdm_copy_to_user(fd, buf, &count, sizeof(count));
+
+	return ret ?: sizeof(count);
+}
+
+static ssize_t udd_write_rt(struct rtdm_fd *fd,
+			    const void __user *buf, size_t len)
+{
+	int ret;
+	u32 val;
+
+	if (len != sizeof(val))
+		return -EINVAL;
+
+	ret = rtdm_safe_copy_from_user(fd, &val, buf, sizeof(val));
+	if (ret)
+		return ret;
+
+	ret = udd_ioctl_rt(fd, val ? UDD_RTIOC_IRQEN : UDD_RTIOC_IRQDIS, NULL);
+
+	return ret ?: len;
+}
+
+static int udd_select(struct rtdm_fd *fd, struct xnselector *selector,
+		      unsigned int type, unsigned int index)
+{
+	struct udd_device *udd;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->irq == UDD_IRQ_NONE)
+		return -EIO;
+
+	return rtdm_event_select(&udd->__reserved.pulse,
+				 selector, type, index);
+}
+
+static int udd_irq_handler(rtdm_irq_t *irqh)
+{
+	struct udd_device *udd;
+	int ret;
+
+	udd = rtdm_irq_get_arg(irqh, struct udd_device);
+	ret = udd->ops.interrupt(udd);
+	if (ret == RTDM_IRQ_HANDLED)
+		udd_notify_event(udd);
+
+	return ret;
+}
+
+static int mapper_open(struct rtdm_fd *fd, int oflags)
+{
+	int minor = rtdm_fd_minor(fd);
+	struct udd_device *udd;
+
+	/*
+	 * Check that we are opening a mapper instance pointing at a
+	 * valid memory region. e.g. UDD creates the companion device
+	 * "foo,mapper" on the fly when registering the main device
+	 * "foo". Userland may then open("/dev/foo,mapper0", ...)
+	 * followed by a call to mmap() for mapping the memory region
+	 * #0 as declared in the mem_regions[] array of the main
+	 * device.
+	 *
+	 * We support sparse region arrays, so the device minor shall
+	 * match the mem_regions[] index exactly.
+	 */
+	if (minor < 0 || minor >= UDD_NR_MAPS)
+		return -EIO;
+
+	udd = udd_get_device(fd);
+	if (udd->mem_regions[minor].type == UDD_MEM_NONE)
+		return -EIO;
+
+	return 0;
+}
+
+static void mapper_close(struct rtdm_fd *fd)
+{
+	/* nop */
+}
+
+static int mapper_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct udd_memregion *rn;
+	struct udd_device *udd;
+	size_t len;
+	int ret;
+
+	udd = udd_get_device(fd);
+	if (udd->ops.mmap)
+		/* Offload to client driver if handler is present. */
+		return udd->ops.mmap(fd, vma);
+
+	/* Otherwise DIY using the RTDM helpers. */
+
+	len = vma->vm_end - vma->vm_start;
+	rn = udd->mem_regions + rtdm_fd_minor(fd);
+	if (rn->len < len)
+		/* Can't map that much, bail out. */
+		return -EINVAL;
+
+	switch (rn->type) {
+	case UDD_MEM_PHYS:
+		ret = rtdm_mmap_iomem(vma, rn->addr);
+		break;
+	case UDD_MEM_LOGICAL:
+		ret = rtdm_mmap_kmem(vma, (void *)rn->addr);
+		break;
+	case UDD_MEM_VIRTUAL:
+		ret = rtdm_mmap_vmem(vma, (void *)rn->addr);
+		break;
+	default:
+		ret = -EINVAL;	/* Paranoid, can't happen. */
+	}
+
+	return ret;
+}
+
+static inline int check_memregion(struct udd_device *udd,
+				  struct udd_memregion *rn)
+{
+	if (rn->name == NULL)
+		return -EINVAL;
+
+	if (rn->addr == 0)
+		return -EINVAL;
+
+	if (rn->len == 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static inline int register_mapper(struct udd_device *udd)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+	struct rtdm_driver *drv = &ur->mapper_driver;
+	struct udd_mapper *mapper;
+	struct udd_memregion *rn;
+	int n, ret;
+
+	ur->mapper_name = kasformat("%s,mapper%%d", udd->device_name);
+	if (ur->mapper_name == NULL)
+		return -ENOMEM;
+
+	drv->profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(mapper, RTDM_CLASS_MEMORY,
+				  RTDM_SUBCLASS_GENERIC, 0);
+	drv->device_flags = RTDM_NAMED_DEVICE|RTDM_FIXED_MINOR;
+	drv->device_count = UDD_NR_MAPS;
+	drv->base_minor = 0;
+	drv->ops = (struct rtdm_fd_ops){
+		.open		=	mapper_open,
+		.close		=	mapper_close,
+		.mmap		=	mapper_mmap,
+	};
+
+	for (n = 0, mapper = ur->mapdev; n < UDD_NR_MAPS; n++, mapper++) {
+		rn = udd->mem_regions + n;
+		if (rn->type == UDD_MEM_NONE)
+			continue;
+		mapper->dev.driver = drv;
+		mapper->dev.label = ur->mapper_name;
+		mapper->dev.minor = n;
+		mapper->udd = udd;
+		ret = rtdm_dev_register(&mapper->dev);
+		if (ret)
+			goto undo;
+	}
+
+	return 0;
+undo:
+	while (--n >= 0)
+		rtdm_dev_unregister(&ur->mapdev[n].dev);
+
+	return ret;
+}
+
+/**
+ * @brief Register a UDD device
+ *
+ * This routine registers a mini-driver at the UDD core.
+ *
+ * @param udd @ref udd_device "UDD device descriptor" which should
+ * describe the new device properties.
+ *
+ * @return Zero is returned upon success, otherwise a negative error
+ * code is received, from the set of error codes defined by
+ * rtdm_dev_register(). In addition, the following error codes can be
+ * returned:
+ *
+ * - -EINVAL, some of the memory regions declared in the
+ *   udd_device.mem_regions[] array have invalid properties, i.e. bad
+ *   type, NULL name, zero length or address. Any undeclared region
+ *   entry from the array must bear the UDD_MEM_NONE type.
+ *
+ * - -EINVAL, if udd_device.irq is different from UDD_IRQ_CUSTOM and
+ * UDD_IRQ_NONE but invalid, causing rtdm_irq_request() to fail.
+ *
+ * - -EINVAL, if udd_device.device_flags contains invalid flags.
+ *
+ * - -ENOSYS, if this service is called while the real-time core is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int udd_register_device(struct udd_device *udd)
+{
+	struct rtdm_device *dev = &udd->__reserved.device;
+	struct udd_reserved *ur = &udd->__reserved;
+	struct rtdm_driver *drv = &ur->driver;
+	struct udd_memregion *rn;
+	int ret, n;
+
+	if (udd->device_flags & RTDM_PROTOCOL_DEVICE)
+		return -EINVAL;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM &&
+	    udd->ops.interrupt == NULL)
+		return -EINVAL;
+
+	for (n = 0, ur->nr_maps = 0; n < UDD_NR_MAPS; n++) {
+		/* We allow sparse region arrays. */
+		rn = udd->mem_regions + n;
+		if (rn->type == UDD_MEM_NONE)
+			continue;
+		ret = check_memregion(udd, rn);
+		if (ret)
+			return ret;
+		udd->__reserved.nr_maps++;
+	}
+
+	drv->profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(udd->device_name, RTDM_CLASS_UDD,
+				  udd->device_subclass, 0);
+	drv->device_flags = RTDM_NAMED_DEVICE|udd->device_flags;
+	drv->device_count = 1;
+	drv->context_size = sizeof(struct udd_context);
+	drv->ops = (struct rtdm_fd_ops){
+		.open = udd_open,
+		.ioctl_rt = udd_ioctl_rt,
+		.read_rt = udd_read_rt,
+		.write_rt = udd_write_rt,
+		.close = udd_close,
+		.select = udd_select,
+	};
+
+	dev->driver = drv;
+	dev->label = udd->device_name;
+
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		return ret;
+
+	if (ur->nr_maps > 0) {
+		ret = register_mapper(udd);
+		if (ret)
+			goto fail_mapper;
+	} else
+		ur->mapper_name = NULL;
+
+	ur->event_count = 0;
+	rtdm_event_init(&ur->pulse, 0);
+	ur->signfy.pid = -1;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM) {
+		ret = rtdm_irq_request(&ur->irqh, udd->irq,
+				       udd_irq_handler, 0,
+				       dev->name, udd);
+		if (ret)
+			goto fail_irq_request;
+	}
+
+	return 0;
+
+fail_irq_request:
+	for (n = 0; n < UDD_NR_MAPS; n++) {
+		rn = udd->mem_regions + n;
+		if (rn->type != UDD_MEM_NONE)
+			rtdm_dev_unregister(&ur->mapdev[n].dev);
+	}
+fail_mapper:
+	rtdm_dev_unregister(dev);
+	if (ur->mapper_name)
+		kfree(ur->mapper_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(udd_register_device);
+
+/**
+ * @brief Unregister a UDD device
+ *
+ * This routine unregisters a mini-driver from the UDD core. This
+ * routine waits until all connections to @a udd have been closed
+ * prior to unregistering.
+ *
+ * @param udd UDD device descriptor
+ *
+ * @return Zero is returned upon success, otherwise -ENXIO is received
+ * if this service is called while the Cobalt kernel is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int udd_unregister_device(struct udd_device *udd)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+	struct udd_memregion *rn;
+	int n;
+
+	rtdm_event_destroy(&ur->pulse);
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM)
+		rtdm_irq_free(&ur->irqh);
+
+	for (n = 0; n < UDD_NR_MAPS; n++) {
+		rn = udd->mem_regions + n;
+		if (rn->type != UDD_MEM_NONE)
+			rtdm_dev_unregister(&ur->mapdev[n].dev);
+	}
+
+	if (ur->mapper_name)
+		kfree(ur->mapper_name);
+
+	rtdm_dev_unregister(&ur->device);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(udd_unregister_device);
+
+/**
+ * @brief Notify an IRQ event for an unmanaged interrupt
+ *
+ * When the UDD core shall hand over the interrupt management for a
+ * device to the mini-driver (see UDD_IRQ_CUSTOM), the latter should
+ * notify the UDD core when IRQ events are received by calling this
+ * service.
+ *
+ * As a result, the UDD core wakes up any Cobalt thread waiting for
+ * interrupts on the device via a read(2) or select(2) call.
+ *
+ * @param udd UDD device descriptor receiving the IRQ.
+ *
+ * @coretags{coreirq-only}
+ *
+ * @note In case the @ref udd_irq_handler "IRQ handler" from the
+ * mini-driver requested the UDD core not to re-enable the interrupt
+ * line, the application may later request the unmasking by issuing
+ * the UDD_RTIOC_IRQEN ioctl(2) command. Writing a non-zero integer to
+ * the device via the write(2) system call has the same effect.
+ */
+void udd_notify_event(struct udd_device *udd)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+	union sigval sival;
+	rtdm_lockctx_t ctx;
+
+	cobalt_atomic_enter(ctx);
+	ur->event_count++;
+	rtdm_event_signal(&ur->pulse);
+	cobalt_atomic_leave(ctx);
+
+	if (ur->signfy.pid > 0) {
+		sival.sival_int = (int)ur->event_count;
+		__cobalt_sigqueue(ur->signfy.pid, ur->signfy.sig, &sival);
+	}
+}
+EXPORT_SYMBOL_GPL(udd_notify_event);
+
+struct irqswitch_work {
+	struct pipeline_inband_work inband_work;
+	rtdm_irq_t *irqh;
+	int enabled;
+	rtdm_event_t *done;
+	struct irqswitch_work *self; /* Revisit: I-pipe requirement */
+};
+
+static void lostage_irqswitch_line(struct pipeline_inband_work *inband_work)
+{
+	struct irqswitch_work *rq;
+
+	/*
+	 * This runs from secondary mode, we may flip the IRQ state
+	 * now.
+	 */
+	rq = container_of(inband_work, struct irqswitch_work, inband_work);
+	if (rq->enabled)
+		rtdm_irq_enable(rq->irqh);
+	else
+		rtdm_irq_disable(rq->irqh);
+
+	if (rq->done)
+		rtdm_event_signal(rq->done);
+
+	xnfree(rq->self);
+}
+
+static void switch_irq_line(rtdm_irq_t *irqh, int enable, rtdm_event_t *done)
+{
+	struct irqswitch_work *rq;
+
+	rq = xnmalloc(sizeof(*rq));
+	if (WARN_ON(rq == NULL))
+		return;
+
+	rq->inband_work = (struct pipeline_inband_work)
+		PIPELINE_INBAND_WORK_INITIALIZER(*rq,
+					lostage_irqswitch_line);
+	rq->irqh = irqh;
+	rq->enabled = enable;
+	rq->done = done;
+	rq->self = rq;	/* Revisit: I-pipe requirement */
+
+	/*
+	 * Not pretty, but we may not traverse the kernel code for
+	 * enabling/disabling IRQ lines from primary mode. Defer this
+	 * to the root context.
+	 */
+	pipeline_post_inband_work(rq);
+}
+
+/**
+ * @brief Enable the device IRQ line
+ *
+ * This service issues a request to the regular kernel for enabling
+ * the IRQ line registered by the driver. If the caller runs in
+ * primary mode, the request is scheduled but deferred until the
+ * current CPU leaves the real-time domain (see note). Otherwise, the
+ * request is immediately handled.
+ *
+ * @param udd The UDD driver handling the IRQ to disable. If no IRQ
+ * was registered by the driver at the UDD core, this routine has no
+ * effect.
+ *
+ * @param done Optional event to signal upon completion. If non-NULL,
+ * @a done will be posted by a call to rtdm_event_signal() after the
+ * interrupt line is enabled.
+ *
+ * @coretags{unrestricted}
+ *
+ * @note The deferral is required as some interrupt management code
+ * involved in enabling interrupt lines may not be safely executed
+ * from primary mode. By passing a valid @a done object address, the
+ * caller can wait for the request to complete, by sleeping on
+ * rtdm_event_wait().
+ */
+void udd_enable_irq(struct udd_device *udd, rtdm_event_t *done)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM)
+		switch_irq_line(&ur->irqh, 1, done);
+}
+EXPORT_SYMBOL_GPL(udd_enable_irq);
+
+/**
+ * @brief Disable the device IRQ line
+ *
+ * This service issues a request to the regular kernel for disabling
+ * the IRQ line registered by the driver. If the caller runs in
+ * primary mode, the request is scheduled but deferred until the
+ * current CPU leaves the real-time domain (see note). Otherwise, the
+ * request is immediately handled.
+ *
+ * @param udd The UDD driver handling the IRQ to disable. If no IRQ
+ * was registered by the driver at the UDD core, this routine has no
+ * effect.
+ *
+ * @param done Optional event to signal upon completion. If non-NULL,
+ * @a done will be posted by a call to rtdm_event_signal() after the
+ * interrupt line is disabled.
+ *
+ * @coretags{unrestricted}
+ *
+ * @note The deferral is required as some interrupt management code
+ * involved in disabling interrupt lines may not be safely executed
+ * from primary mode. By passing a valid @a done object address, the
+ * caller can wait for the request to complete, by sleeping on
+ * rtdm_event_wait().
+ */
+void udd_disable_irq(struct udd_device *udd, rtdm_event_t *done)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM)
+		switch_irq_line(&ur->irqh, 0, done);
+}
+EXPORT_SYMBOL_GPL(udd_disable_irq);
+
+/**
+ * @brief RTDM file descriptor to target UDD device
+ *
+ * Retrieves the UDD device from a RTDM file descriptor.
+ *
+ * @param fd File descriptor received by an ancillary I/O handler
+ * from a mini-driver based on the UDD core.
+ *
+ * @return A pointer to the UDD device to which @a fd refers to.
+ *
+ * @note This service is intended for use by mini-drivers based on the
+ * UDD core exclusively. Passing file descriptors referring to other
+ * RTDM devices will certainly lead to invalid results.
+ *
+ * @coretags{mode-unrestricted}
+ */
+struct udd_device *udd_get_device(struct rtdm_fd *fd)
+{
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+
+	if (dev->driver->profile_info.class_id == RTDM_CLASS_MEMORY)
+		return container_of(dev, struct udd_mapper, dev)->udd;
+
+	return container_of(dev, struct udd_device, __reserved.device);
+}
+EXPORT_SYMBOL_GPL(udd_get_device);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/udd/Makefile	2022-03-21 12:58:31.399869361 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:2 @
--- linux/drivers/xenomai/testing/timerbench.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-y += -I$(srctree)/kernel
+
+obj-$(CONFIG_XENO_DRIVERS_UDD) += xeno_udd.o
+
+xeno_udd-y := udd.o
+++ linux-patched/drivers/xenomai/testing/timerbench.c	2022-03-21 12:58:31.392869429 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/testing/rtdmtest.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/arith.h>
+#include <rtdm/testing.h>
+#include <rtdm/driver.h>
+#include <rtdm/compat.h>
+
+MODULE_DESCRIPTION("Timer latency test helper");
+MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>");
+MODULE_VERSION("0.2.1");
+MODULE_LICENSE("GPL");
+
+struct rt_tmbench_context {
+	int mode;
+	unsigned int period;
+	int freeze_max;
+	int warmup_loops;
+	int samples_per_sec;
+	int32_t *histogram_min;
+	int32_t *histogram_max;
+	int32_t *histogram_avg;
+	int histogram_size;
+	int bucketsize;
+
+	rtdm_task_t timer_task;
+
+	rtdm_timer_t timer;
+	int warmup;
+	uint64_t start_time;
+	uint64_t date;
+	struct rttst_bench_res curr;
+
+	rtdm_event_t result_event;
+	struct rttst_interm_bench_res result;
+
+	struct semaphore nrt_mutex;
+};
+
+static inline void add_histogram(struct rt_tmbench_context *ctx,
+				 __s32 *histogram, __s32 addval)
+{
+	/* bucketsize steps */
+	int inabs = (addval >= 0 ? addval : -addval) / ctx->bucketsize;
+	histogram[inabs < ctx->histogram_size ?
+		  inabs : ctx->histogram_size - 1]++;
+}
+
+static inline long long slldiv(long long s, unsigned d)
+{
+	return s >= 0 ? xnarch_ulldiv(s, d, NULL) : -xnarch_ulldiv(-s, d, NULL);
+}
+
+static void eval_inner_loop(struct rt_tmbench_context *ctx, __s32 dt)
+{
+	if (dt > ctx->curr.max)
+		ctx->curr.max = dt;
+	if (dt < ctx->curr.min)
+		ctx->curr.min = dt;
+	ctx->curr.avg += dt;
+
+	if (xntrace_enabled() &&
+		ctx->freeze_max &&
+		(dt > ctx->result.overall.max) &&
+		!ctx->warmup) {
+		ctx->result.overall.max = dt;
+		xntrace_latpeak_freeze(dt);
+	}
+
+	ctx->date += ctx->period;
+
+	if (!ctx->warmup && ctx->histogram_size)
+		add_histogram(ctx, ctx->histogram_avg, dt);
+
+	/* Evaluate overruns and adjust next release date.
+	   Beware of signedness! */
+	while (dt > 0 && (unsigned long)dt > ctx->period) {
+		ctx->curr.overruns++;
+		ctx->date += ctx->period;
+		dt -= ctx->period;
+	}
+}
+
+static void eval_outer_loop(struct rt_tmbench_context *ctx)
+{
+	if (!ctx->warmup) {
+		if (ctx->histogram_size) {
+			add_histogram(ctx, ctx->histogram_max, ctx->curr.max);
+			add_histogram(ctx, ctx->histogram_min, ctx->curr.min);
+		}
+
+		ctx->result.last.min = ctx->curr.min;
+		if (ctx->curr.min < ctx->result.overall.min)
+			ctx->result.overall.min = ctx->curr.min;
+
+		ctx->result.last.max = ctx->curr.max;
+		if (ctx->curr.max > ctx->result.overall.max)
+			ctx->result.overall.max = ctx->curr.max;
+
+		ctx->result.last.avg =
+		    slldiv(ctx->curr.avg, ctx->samples_per_sec);
+		ctx->result.overall.avg += ctx->result.last.avg;
+		ctx->result.overall.overruns += ctx->curr.overruns;
+		rtdm_event_pulse(&ctx->result_event);
+	}
+
+	if (ctx->warmup &&
+	    (ctx->result.overall.test_loops == ctx->warmup_loops)) {
+		ctx->result.overall.test_loops = 0;
+		ctx->warmup = 0;
+	}
+
+	ctx->curr.min = 10000000;
+	ctx->curr.max = -10000000;
+	ctx->curr.avg = 0;
+	ctx->curr.overruns = 0;
+
+	ctx->result.overall.test_loops++;
+}
+
+static void timer_task_proc(void *arg)
+{
+	struct rt_tmbench_context *ctx = arg;
+	int count, err;
+	spl_t s;
+
+	/* first event: one millisecond from now. */
+	ctx->date = rtdm_clock_read_monotonic() + 1000000;
+
+	while (1) {
+		for (count = 0; count < ctx->samples_per_sec; count++) {
+			cobalt_atomic_enter(s);
+			ctx->start_time = rtdm_clock_read_monotonic();
+			err = rtdm_task_sleep_abs(ctx->date,
+						  RTDM_TIMERMODE_ABSOLUTE);
+			cobalt_atomic_leave(s);
+			if (err)
+				return;
+
+			eval_inner_loop(ctx,
+					(__s32)(rtdm_clock_read_monotonic() -
+						ctx->date));
+		}
+		eval_outer_loop(ctx);
+	}
+}
+
+static void timer_proc(rtdm_timer_t *timer)
+{
+	struct rt_tmbench_context *ctx =
+	    container_of(timer, struct rt_tmbench_context, timer);
+	int err;
+
+	do {
+		eval_inner_loop(ctx, (__s32)(rtdm_clock_read_monotonic() -
+					     ctx->date));
+
+		ctx->start_time = rtdm_clock_read_monotonic();
+		err = rtdm_timer_start_in_handler(&ctx->timer, ctx->date, 0,
+						  RTDM_TIMERMODE_ABSOLUTE);
+
+		if (++ctx->curr.test_loops >= ctx->samples_per_sec) {
+			ctx->curr.test_loops = 0;
+			eval_outer_loop(ctx);
+		}
+	} while (err);
+}
+
+static int rt_tmbench_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_tmbench_context *ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	ctx->mode = RTTST_TMBENCH_INVALID;
+	sema_init(&ctx->nrt_mutex, 1);
+
+	return 0;
+}
+
+static void rt_tmbench_close(struct rtdm_fd *fd)
+{
+	struct rt_tmbench_context *ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	down(&ctx->nrt_mutex);
+
+	if (ctx->mode >= 0) {
+		if (ctx->mode == RTTST_TMBENCH_TASK)
+			rtdm_task_destroy(&ctx->timer_task);
+		else if (ctx->mode == RTTST_TMBENCH_HANDLER)
+			rtdm_timer_destroy(&ctx->timer);
+
+		rtdm_event_destroy(&ctx->result_event);
+
+		if (ctx->histogram_size)
+			kfree(ctx->histogram_min);
+
+		ctx->mode = RTTST_TMBENCH_INVALID;
+		ctx->histogram_size = 0;
+	}
+
+	up(&ctx->nrt_mutex);
+}
+
+static int rt_tmbench_start(struct rtdm_fd *fd,
+			    struct rt_tmbench_context *ctx,
+			    struct rttst_tmbench_config __user *user_config)
+{
+	int err = 0;
+	spl_t s;
+
+	struct rttst_tmbench_config config_buf;
+	struct rttst_tmbench_config *config =
+		(struct rttst_tmbench_config *)user_config;
+
+	if (rtdm_fd_is_user(fd)) {
+		if (rtdm_safe_copy_from_user
+		    (fd, &config_buf,user_config,
+		     sizeof(struct rttst_tmbench_config)) < 0)
+			return -EFAULT;
+
+		config = &config_buf;
+	}
+
+	down(&ctx->nrt_mutex);
+
+	ctx->period = config->period;
+	ctx->warmup_loops = config->warmup_loops;
+	ctx->samples_per_sec = 1000000000 / ctx->period;
+	ctx->histogram_size = config->histogram_size;
+	ctx->freeze_max = config->freeze_max;
+
+	if (ctx->histogram_size > 0) {
+		ctx->histogram_min =
+		    kmalloc(3 * ctx->histogram_size * sizeof(int32_t),
+			    GFP_KERNEL);
+		ctx->histogram_max =
+		    ctx->histogram_min + config->histogram_size;
+		ctx->histogram_avg =
+		    ctx->histogram_max + config->histogram_size;
+
+		if (!ctx->histogram_min) {
+			up(&ctx->nrt_mutex);
+			return -ENOMEM;
+		}
+
+		memset(ctx->histogram_min, 0,
+		       3 * ctx->histogram_size * sizeof(int32_t));
+		ctx->bucketsize = config->histogram_bucketsize;
+	}
+
+	ctx->result.overall.min = 10000000;
+	ctx->result.overall.max = -10000000;
+	ctx->result.overall.avg = 0;
+	ctx->result.overall.test_loops = 1;
+	ctx->result.overall.overruns = 0;
+
+	ctx->warmup = 1;
+
+	ctx->curr.min = 10000000;
+	ctx->curr.max = -10000000;
+	ctx->curr.avg = 0;
+	ctx->curr.overruns = 0;
+	ctx->mode = RTTST_TMBENCH_INVALID;
+
+	rtdm_event_init(&ctx->result_event, 0);
+
+	if (config->mode == RTTST_TMBENCH_TASK) {
+		err = rtdm_task_init(&ctx->timer_task, "timerbench",
+				timer_task_proc, ctx,
+				config->priority, 0);
+		if (!err)
+			ctx->mode = RTTST_TMBENCH_TASK;
+	} else {
+		rtdm_timer_init(&ctx->timer, timer_proc,
+				rtdm_fd_device(fd)->name);
+
+		ctx->curr.test_loops = 0;
+
+		ctx->mode = RTTST_TMBENCH_HANDLER;
+
+		cobalt_atomic_enter(s);
+		ctx->start_time = rtdm_clock_read_monotonic();
+
+		/* first event: one millisecond from now. */
+		ctx->date = ctx->start_time + 1000000;
+
+		err = rtdm_timer_start(&ctx->timer, ctx->date, 0,
+				RTDM_TIMERMODE_ABSOLUTE);
+		cobalt_atomic_leave(s);
+	}
+
+	up(&ctx->nrt_mutex);
+
+	return err;
+}
+
+static int kernel_copy_results(struct rt_tmbench_context *ctx,
+			       struct rttst_overall_bench_res *res)
+{
+	int size;
+
+	memcpy(&res->result, &ctx->result.overall, sizeof(res->result));
+
+	if (ctx->histogram_size > 0) {
+		size = ctx->histogram_size * sizeof(int32_t);
+		memcpy(res->histogram_min, ctx->histogram_min, size);
+		memcpy(res->histogram_max, ctx->histogram_max, size);
+		memcpy(res->histogram_avg, ctx->histogram_avg, size);
+		kfree(ctx->histogram_min);
+	}
+
+	return 0;
+}
+
+static int user_copy_results(struct rt_tmbench_context *ctx,
+			     struct rttst_overall_bench_res __user *u_res)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(ctx);
+	struct rttst_overall_bench_res res_buf;
+	int ret, size;
+
+	ret = rtdm_safe_copy_to_user(fd, &u_res->result,
+				     &ctx->result.overall,
+				     sizeof(u_res->result));
+	if (ret || ctx->histogram_size == 0)
+		return ret;
+
+	size = ctx->histogram_size * sizeof(int32_t);
+
+	if (rtdm_safe_copy_from_user(fd, &res_buf, u_res, sizeof(res_buf)) < 0 ||
+	    rtdm_safe_copy_to_user(fd, res_buf.histogram_min,
+				   ctx->histogram_min, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, res_buf.histogram_max,
+				   ctx->histogram_max, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, res_buf.histogram_avg,
+				   ctx->histogram_avg, size) < 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+static int compat_user_copy_results(struct rt_tmbench_context *ctx,
+				    struct compat_rttst_overall_bench_res __user *u_res)
+{
+	struct compat_rttst_overall_bench_res res_buf;
+	struct rtdm_fd *fd = rtdm_private_to_fd(ctx);
+	int ret, size;
+
+	ret = rtdm_safe_copy_to_user(fd, &u_res->result,
+				     &ctx->result.overall,
+				     sizeof(u_res->result));
+	if (ret || ctx->histogram_size == 0)
+		return ret;
+
+	size = ctx->histogram_size * sizeof(int32_t);
+
+	if (rtdm_safe_copy_from_user(fd, &res_buf, u_res, sizeof(res_buf)) < 0 ||
+	    rtdm_safe_copy_to_user(fd, compat_ptr(res_buf.histogram_min),
+				   ctx->histogram_min, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, compat_ptr(res_buf.histogram_max),
+				   ctx->histogram_max, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, compat_ptr(res_buf.histogram_avg),
+				   ctx->histogram_avg, size) < 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+#endif /* CONFIG_XENO_ARCH_SYS3264 */
+
+static int rt_tmbench_stop(struct rt_tmbench_context *ctx, void *u_res)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(ctx);
+	int ret;
+
+	down(&ctx->nrt_mutex);
+
+	if (ctx->mode < 0) {
+		up(&ctx->nrt_mutex);
+		return -EINVAL;
+	}
+
+	if (ctx->mode == RTTST_TMBENCH_TASK)
+		rtdm_task_destroy(&ctx->timer_task);
+	else if (ctx->mode == RTTST_TMBENCH_HANDLER)
+		rtdm_timer_destroy(&ctx->timer);
+
+	rtdm_event_destroy(&ctx->result_event);
+
+	ctx->mode = RTTST_TMBENCH_INVALID;
+
+	ctx->result.overall.avg =
+	    slldiv(ctx->result.overall.avg,
+		   ((ctx->result.overall.test_loops) > 1 ?
+		    ctx->result.overall.test_loops : 2) - 1);
+
+	if (rtdm_fd_is_user(fd)) {
+#ifdef CONFIG_XENO_ARCH_SYS3264
+		if (rtdm_fd_is_compat(fd))
+			ret = compat_user_copy_results(ctx, u_res);
+		else
+#endif
+			ret = user_copy_results(ctx, u_res);
+	} else
+		ret = kernel_copy_results(ctx, u_res);
+
+	if (ctx->histogram_size > 0)
+		kfree(ctx->histogram_min);
+
+	up(&ctx->nrt_mutex);
+
+	return ret;
+}
+
+static int rt_tmbench_ioctl_nrt(struct rtdm_fd *fd,
+				unsigned int request, void __user *arg)
+{
+	struct rt_tmbench_context *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTTST_RTIOC_TMBENCH_START:
+		err = rt_tmbench_start(fd, ctx, arg);
+		break;
+
+	COMPAT_CASE(RTTST_RTIOC_TMBENCH_STOP):
+		err = rt_tmbench_stop(ctx, arg);
+		break;
+	default:
+		err = -ENOSYS;
+	}
+
+	return err;
+}
+
+static int rt_tmbench_ioctl_rt(struct rtdm_fd *fd,
+			       unsigned int request, void __user *arg)
+{
+	struct rt_tmbench_context *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTTST_RTIOC_INTERM_BENCH_RES:
+		err = rtdm_event_wait(&ctx->result_event);
+		if (err)
+			return err;
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rttst_interm_bench_res __user *user_res = arg;
+
+			err = rtdm_safe_copy_to_user(fd, user_res,
+						     &ctx->result,
+						     sizeof(*user_res));
+		} else {
+			struct rttst_interm_bench_res *res = (void *)arg;
+
+			memcpy(res, &ctx->result, sizeof(*res));
+		}
+
+		break;
+
+	default:
+		err = -ENOSYS;
+	}
+
+	return err;
+}
+
+static struct rtdm_driver timerbench_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(timerbench,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_TIMERBENCH,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE,
+	.device_count		= 1,
+	.context_size		= sizeof(struct rt_tmbench_context),
+	.ops = {
+		.open		= rt_tmbench_open,
+		.close		= rt_tmbench_close,
+		.ioctl_rt	= rt_tmbench_ioctl_rt,
+		.ioctl_nrt	= rt_tmbench_ioctl_nrt,
+	},
+};
+
+static struct rtdm_device device = {
+	.driver = &timerbench_driver,
+	.label = "timerbench",
+};
+
+static int __init __timerbench_init(void)
+{
+	return rtdm_dev_register(&device);
+}
+
+static void __timerbench_exit(void)
+{
+	rtdm_dev_unregister(&device);
+}
+
+module_init(__timerbench_init);
+module_exit(__timerbench_exit);
+++ linux-patched/drivers/xenomai/testing/rtdmtest.c	2022-03-21 12:58:31.384869507 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/testing/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <rtdm/driver.h>
+#include <rtdm/testing.h>
+
+MODULE_DESCRIPTION("RTDM test helper module");
+MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>");
+MODULE_VERSION("0.1.0");
+MODULE_LICENSE("GPL");
+
+struct rtdm_basic_context {
+	rtdm_timer_t close_timer;
+	unsigned long close_counter;
+	unsigned long close_deferral;
+};
+
+struct rtdm_actor_context {
+	rtdm_task_t actor_task;
+	unsigned int request;
+	rtdm_event_t run;
+	rtdm_event_t done;
+	union {
+		__u32 cpu;
+	} args;
+};
+
+static void close_timer_proc(rtdm_timer_t *timer)
+{
+	struct rtdm_basic_context *ctx =
+		container_of(timer, struct rtdm_basic_context, close_timer);
+
+	if (ctx->close_counter != 1)
+		printk(XENO_ERR
+		       "rtdmtest: %s: close_counter is %lu, should be 1!\n",
+		       __FUNCTION__, ctx->close_counter);
+
+	ctx->close_deferral = RTTST_RTDM_NORMAL_CLOSE;
+	rtdm_fd_unlock(rtdm_private_to_fd(ctx));
+}
+
+static int rtdm_basic_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_basic_context *ctx = rtdm_fd_to_private(fd);
+
+	rtdm_timer_init(&ctx->close_timer, close_timer_proc,
+			"rtdm close test");
+	ctx->close_counter = 0;
+	ctx->close_deferral = RTTST_RTDM_NORMAL_CLOSE;
+
+	return 0;
+}
+
+static void rtdm_basic_close(struct rtdm_fd *fd)
+{
+	struct rtdm_basic_context *ctx = rtdm_fd_to_private(fd);
+
+	ctx->close_counter++;
+
+	switch (ctx->close_deferral) {
+	case RTTST_RTDM_DEFER_CLOSE_CONTEXT:
+		if (ctx->close_counter != 2) {
+			printk(XENO_ERR
+			       "rtdmtest: %s: close_counter is %lu, "
+			       "should be 2!\n",
+			       __FUNCTION__, ctx->close_counter);
+			return;
+		}
+		rtdm_fd_unlock(fd);
+		break;
+	}
+
+	rtdm_timer_destroy(&ctx->close_timer);
+}
+
+static int rtdm_basic_ioctl_rt(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	int ret, magic = RTTST_RTDM_MAGIC_PRIMARY;
+
+	switch (request) {
+	case RTTST_RTIOC_RTDM_PING_PRIMARY:
+		ret = rtdm_safe_copy_to_user(fd, arg, &magic,
+					     sizeof(magic));
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static int rtdm_basic_ioctl_nrt(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	struct rtdm_basic_context *ctx = rtdm_fd_to_private(fd);
+	int ret = 0, magic = RTTST_RTDM_MAGIC_SECONDARY;
+
+	switch (request) {
+	case RTTST_RTIOC_RTDM_DEFER_CLOSE:
+		ctx->close_deferral = (unsigned long)arg;
+		if (ctx->close_deferral == RTTST_RTDM_DEFER_CLOSE_CONTEXT) {
+			++ctx->close_counter;
+			rtdm_fd_lock(fd);
+			rtdm_timer_start(&ctx->close_timer, 300000000ULL, 0,
+					RTDM_TIMERMODE_RELATIVE);
+		}
+		break;
+	case RTTST_RTIOC_RTDM_PING_SECONDARY:
+		ret = rtdm_safe_copy_to_user(fd, arg, &magic,
+					     sizeof(magic));
+		break;
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+static void actor_handler(void *arg)
+{
+	struct rtdm_actor_context *ctx = arg;
+	int ret;
+
+	for (;;) {
+		if (rtdm_task_should_stop())
+			return;
+
+		ret = rtdm_event_wait(&ctx->run);
+		if (ret)
+			break;
+
+		switch (ctx->request) {
+		case RTTST_RTIOC_RTDM_ACTOR_GET_CPU:
+			ctx->args.cpu = task_cpu(current);
+			break;
+		default:
+			printk(XENO_ERR "rtdmtest: bad request code %d\n",
+			       ctx->request);
+		}
+
+		rtdm_event_signal(&ctx->done);
+	}
+}
+
+static int rtdm_actor_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_actor_context *ctx = rtdm_fd_to_private(fd);
+
+	rtdm_event_init(&ctx->run, 0);
+	rtdm_event_init(&ctx->done, 0);
+
+	return rtdm_task_init(&ctx->actor_task, "rtdm_actor",
+			      actor_handler, ctx,
+			      RTDM_TASK_LOWEST_PRIORITY, 0);
+}
+
+static void rtdm_actor_close(struct rtdm_fd *fd)
+{
+	struct rtdm_actor_context *ctx = rtdm_fd_to_private(fd);
+
+	rtdm_task_destroy(&ctx->actor_task);
+	rtdm_event_destroy(&ctx->run);
+	rtdm_event_destroy(&ctx->done);
+}
+
+#define ACTION_TIMEOUT 50000000ULL /* 50 ms timeout on action */
+
+static int run_action(struct rtdm_actor_context *ctx, unsigned int request)
+{
+	rtdm_toseq_t toseq;
+
+	rtdm_toseq_init(&toseq, ACTION_TIMEOUT);
+	ctx->request = request;
+	rtdm_event_signal(&ctx->run);
+	/*
+	 * XXX: The handshake mechanism is not bullet-proof against
+	 * -EINTR received when waiting for the done event. Hopefully
+	 * we won't restart/start a request while the action task has
+	 * not yet completed the previous one we stopped waiting for
+	 * abruptly.
+	 */
+	return rtdm_event_timedwait(&ctx->done, ACTION_TIMEOUT, &toseq);
+}
+
+static int rtdm_actor_ioctl(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	struct rtdm_actor_context *ctx = rtdm_fd_to_private(fd);
+	int ret;
+
+	switch (request) {
+	case RTTST_RTIOC_RTDM_ACTOR_GET_CPU:
+		ctx->args.cpu = (__u32)-EINVAL;
+		ret = run_action(ctx, request);
+		if (ret)
+			break;
+		ret = rtdm_safe_copy_to_user(fd, arg, &ctx->args.cpu,
+					     sizeof(ctx->args.cpu));
+		break;
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+      
+static struct rtdm_driver rtdm_basic_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(rtdm_test_basic,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_RTDMTEST,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= 2,
+	.context_size		= sizeof(struct rtdm_basic_context),
+	.ops = {
+		.open		= rtdm_basic_open,
+		.close		= rtdm_basic_close,
+		.ioctl_rt	= rtdm_basic_ioctl_rt,
+		.ioctl_nrt	= rtdm_basic_ioctl_nrt,
+	},
+};
+
+static struct rtdm_driver rtdm_actor_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(rtdm_test_actor,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_RTDMTEST,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= 1,
+	.context_size		= sizeof(struct rtdm_actor_context),
+	.ops = {
+		.open		= rtdm_actor_open,
+		.close		= rtdm_actor_close,
+		.ioctl_rt	= rtdm_actor_ioctl,
+	},
+};
+
+static struct rtdm_device device[3] = {
+	[0 ... 1] = {
+		.driver = &rtdm_basic_driver,
+		.label = "rtdm%d",
+	},
+	[2] = {
+		.driver = &rtdm_actor_driver,
+		.label = "rtdmx",
+	}
+};
+
+static int __init rtdm_test_init(void)
+{
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(device); i++) {
+		ret = rtdm_dev_register(device + i);
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+fail:
+	while (i-- > 0)
+		rtdm_dev_unregister(device + i);
+
+	return ret;
+}
+
+static void __exit rtdm_test_exit(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(device); i++)
+		rtdm_dev_unregister(device + i);
+}
+
+module_init(rtdm_test_init);
+module_exit(rtdm_test_exit);
+++ linux-patched/drivers/xenomai/testing/Kconfig	2022-03-21 12:58:31.377869576 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/testing/Makefile	1970-01-01 01:00:00.000000000 +0100
+menu "Testing drivers"
+
+config XENO_DRIVERS_TIMERBENCH
+	tristate "Timer benchmark driver"
+	default y
+	help
+	Kernel-based benchmark driver for timer latency evaluation.
+	See testsuite/latency for a possible front-end.
+
+config XENO_DRIVERS_SWITCHTEST
+	tristate "Context switch unit testing driver"
+	default y
+	help
+	Kernel-based driver for unit testing context switches and
+	FPU switches.
+
+config XENO_DRIVERS_HEAPCHECK
+	tristate "Memory allocator test driver"
+	default y
+	help
+	Kernel-based driver for testing Cobalt's memory allocator.
+
+config XENO_DRIVERS_RTDMTEST
+	depends on m
+	tristate "RTDM unit tests driver"
+	help
+	Kernel driver for performing RTDM unit tests.
+
+endmenu
+++ linux-patched/drivers/xenomai/testing/Makefile	2022-03-21 12:58:31.370869644 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/testing/switchtest.c	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENO_DRIVERS_TIMERBENCH) += xeno_timerbench.o
+obj-$(CONFIG_XENO_DRIVERS_SWITCHTEST) += xeno_switchtest.o
+obj-$(CONFIG_XENO_DRIVERS_RTDMTEST)   += xeno_rtdmtest.o
+obj-$(CONFIG_XENO_DRIVERS_HEAPCHECK)   += xeno_heapcheck.o
+
+xeno_timerbench-y := timerbench.o
+
+xeno_switchtest-y := switchtest.o
+
+xeno_rtdmtest-y := rtdmtest.o
+
+xeno_heapcheck-y := heapcheck.o
+++ linux-patched/drivers/xenomai/testing/switchtest.c	2022-03-21 12:58:31.362869722 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/testing/heapcheck.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/semaphore.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/trace.h>
+#include <rtdm/testing.h>
+#include <rtdm/driver.h>
+#include <asm/xenomai/fptest.h>
+
+MODULE_DESCRIPTION("Cobalt context switch test helper");
+MODULE_AUTHOR("Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>");
+MODULE_VERSION("0.1.1");
+MODULE_LICENSE("GPL");
+
+#define RTSWITCH_RT      0x10000
+#define RTSWITCH_NRT     0
+#define RTSWITCH_KERNEL  0x20000
+
+struct rtswitch_task {
+	struct rttst_swtest_task base;
+	rtdm_event_t rt_synch;
+	struct semaphore nrt_synch;
+	struct xnthread ktask;          /* For kernel-space real-time tasks. */
+	unsigned int last_switch;
+};
+
+struct rtswitch_context {
+	struct rtswitch_task *tasks;
+	unsigned int tasks_count;
+	unsigned int next_index;
+	struct semaphore lock;
+	unsigned int cpu;
+	unsigned int switches_count;
+
+	unsigned long pause_us;
+	unsigned int next_task;
+	rtdm_timer_t wake_up_delay;
+
+	unsigned int failed;
+	struct rttst_swtest_error error;
+
+	struct rtswitch_task *utask;
+	rtdm_nrtsig_t wake_utask;
+};
+
+static int fp_features;
+
+static int report(const char *fmt, ...)
+{
+	va_list ap;
+	int ret;
+
+	va_start(ap, fmt);
+	ret = vprintk(fmt, ap);
+	va_end(ap);
+
+	return ret;
+}
+
+static void handle_ktask_error(struct rtswitch_context *ctx, unsigned int fp_val)
+{
+	struct rtswitch_task *cur = &ctx->tasks[ctx->error.last_switch.to];
+	unsigned int i;
+
+	ctx->failed = 1;
+	ctx->error.fp_val = fp_val;
+
+	if ((cur->base.flags & RTSWITCH_RT) == RTSWITCH_RT)
+		for (i = 0; i < ctx->tasks_count; i++) {
+			struct rtswitch_task *task = &ctx->tasks[i];
+
+			/* Find the first non kernel-space task. */
+			if ((task->base.flags & RTSWITCH_KERNEL))
+				continue;
+
+			/* Unblock it. */
+			switch(task->base.flags & RTSWITCH_RT) {
+			case RTSWITCH_NRT:
+				ctx->utask = task;
+				rtdm_nrtsig_pend(&ctx->wake_utask);
+				break;
+
+			case RTSWITCH_RT:
+				rtdm_event_signal(&task->rt_synch);
+				break;
+			}
+
+			xnthread_suspend(&cur->ktask,
+					 XNSUSP, XN_INFINITE, XN_RELATIVE, NULL);
+		}
+}
+
+static int rtswitch_pend_rt(struct rtswitch_context *ctx,
+			    unsigned int idx)
+{
+	struct rtswitch_task *task;
+	int rc;
+
+	if (idx > ctx->tasks_count)
+		return -EINVAL;
+
+	task = &ctx->tasks[idx];
+	task->base.flags |= RTSWITCH_RT;
+
+	rc = rtdm_event_wait(&task->rt_synch);
+	if (rc < 0)
+		return rc;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static void timed_wake_up(rtdm_timer_t *timer)
+{
+	struct rtswitch_context *ctx =
+		container_of(timer, struct rtswitch_context, wake_up_delay);
+	struct rtswitch_task *task;
+
+	task = &ctx->tasks[ctx->next_task];
+
+	switch (task->base.flags & RTSWITCH_RT) {
+	case RTSWITCH_NRT:
+		ctx->utask = task;
+		rtdm_nrtsig_pend(&ctx->wake_utask);
+		break;
+
+	case RTSWITCH_RT:
+		rtdm_event_signal(&task->rt_synch);
+	}
+}
+
+static int rtswitch_to_rt(struct rtswitch_context *ctx,
+			  unsigned int from_idx,
+			  unsigned int to_idx)
+{
+	struct rtswitch_task *from, *to;
+	int rc;
+
+	if (from_idx > ctx->tasks_count || to_idx > ctx->tasks_count)
+		return -EINVAL;
+
+	/* to == from is a special case which means
+	   "return to the previous task". */
+	if (to_idx == from_idx)
+		to_idx = ctx->error.last_switch.from;
+
+	from = &ctx->tasks[from_idx];
+	to = &ctx->tasks[to_idx];
+
+	from->base.flags |= RTSWITCH_RT;
+	from->last_switch = ++ctx->switches_count;
+	ctx->error.last_switch.from = from_idx;
+	ctx->error.last_switch.to = to_idx;
+	barrier();
+
+	if (ctx->pause_us) {
+		ctx->next_task = to_idx;
+		barrier();
+		rtdm_timer_start(&ctx->wake_up_delay,
+				 ctx->pause_us * 1000, 0,
+				 RTDM_TIMERMODE_RELATIVE);
+		xnsched_lock();
+	} else
+		switch (to->base.flags & RTSWITCH_RT) {
+		case RTSWITCH_NRT:
+			ctx->utask = to;
+			barrier();
+			rtdm_nrtsig_pend(&ctx->wake_utask);
+			xnsched_lock();
+			break;
+
+		case RTSWITCH_RT:
+			xnsched_lock();
+			rtdm_event_signal(&to->rt_synch);
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+	rc = rtdm_event_wait(&from->rt_synch);
+	xnsched_unlock();
+
+	if (rc < 0)
+		return rc;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static int rtswitch_pend_nrt(struct rtswitch_context *ctx,
+			     unsigned int idx)
+{
+	struct rtswitch_task *task;
+
+	if (idx > ctx->tasks_count)
+		return -EINVAL;
+
+	task = &ctx->tasks[idx];
+
+	task->base.flags &= ~RTSWITCH_RT;
+
+	if (down_interruptible(&task->nrt_synch))
+		return -EINTR;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static int rtswitch_to_nrt(struct rtswitch_context *ctx,
+			   unsigned int from_idx,
+			   unsigned int to_idx)
+{
+	struct rtswitch_task *from, *to;
+	unsigned int expected, fp_val;
+	int fp_check;
+
+	if (from_idx > ctx->tasks_count || to_idx > ctx->tasks_count)
+		return -EINVAL;
+
+	/* to == from is a special case which means
+	   "return to the previous task". */
+	if (to_idx == from_idx)
+		to_idx = ctx->error.last_switch.from;
+
+	from = &ctx->tasks[from_idx];
+	to = &ctx->tasks[to_idx];
+
+	fp_check = ctx->switches_count == from->last_switch + 1
+		&& ctx->error.last_switch.from == to_idx
+		&& ctx->error.last_switch.to == from_idx;
+
+	from->base.flags &= ~RTSWITCH_RT;
+	from->last_switch = ++ctx->switches_count;
+	ctx->error.last_switch.from = from_idx;
+	ctx->error.last_switch.to = to_idx;
+	barrier();
+
+	if (ctx->pause_us) {
+		ctx->next_task = to_idx;
+		barrier();
+		rtdm_timer_start(&ctx->wake_up_delay,
+				 ctx->pause_us * 1000, 0,
+				 RTDM_TIMERMODE_RELATIVE);
+	} else
+		switch (to->base.flags & RTSWITCH_RT) {
+		case RTSWITCH_NRT:
+		switch_to_nrt:
+			up(&to->nrt_synch);
+			break;
+
+		case RTSWITCH_RT:
+
+			if (!fp_check || fp_linux_begin() < 0) {
+				fp_check = 0;
+				goto signal_nofp;
+			}
+
+			expected = from_idx + 500 +
+				(ctx->switches_count % 4000000) * 1000;
+
+			fp_regs_set(fp_features, expected);
+			rtdm_event_signal(&to->rt_synch);
+			fp_val = fp_regs_check(fp_features, expected, report);
+			fp_linux_end();
+
+			if(down_interruptible(&from->nrt_synch))
+				return -EINTR;
+			if (ctx->failed)
+				return 1;
+			if (fp_val != expected) {
+				handle_ktask_error(ctx, fp_val);
+				return 1;
+			}
+
+			from->base.flags &= ~RTSWITCH_RT;
+			from->last_switch = ++ctx->switches_count;
+			ctx->error.last_switch.from = from_idx;
+			ctx->error.last_switch.to = to_idx;
+			if ((to->base.flags & RTSWITCH_RT) == RTSWITCH_NRT)
+				goto switch_to_nrt;
+			expected = from_idx + 500 +
+				(ctx->switches_count % 4000000) * 1000;
+			barrier();
+
+			fp_linux_begin();
+			fp_regs_set(fp_features, expected);
+			rtdm_event_signal(&to->rt_synch);
+			fp_val = fp_regs_check(fp_features, expected, report);
+			fp_linux_end();
+
+			if (down_interruptible(&from->nrt_synch))
+				return -EINTR;
+			if (ctx->failed)
+				return 1;
+			if (fp_val != expected) {
+				handle_ktask_error(ctx, fp_val);
+				return 1;
+			}
+
+			from->base.flags &= ~RTSWITCH_RT;
+			from->last_switch = ++ctx->switches_count;
+			ctx->error.last_switch.from = from_idx;
+			ctx->error.last_switch.to = to_idx;
+			barrier();
+			if ((to->base.flags & RTSWITCH_RT) == RTSWITCH_NRT)
+				goto switch_to_nrt;
+
+		signal_nofp:
+			rtdm_event_signal(&to->rt_synch);
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+	if (down_interruptible(&from->nrt_synch))
+		return -EINTR;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static int rtswitch_set_tasks_count(struct rtswitch_context *ctx, unsigned int count)
+{
+	struct rtswitch_task *tasks;
+
+	if (ctx->tasks_count == count)
+		return 0;
+
+	tasks = vmalloc(count * sizeof(*tasks));
+
+	if (!tasks)
+		return -ENOMEM;
+
+	down(&ctx->lock);
+
+	if (ctx->tasks)
+		vfree(ctx->tasks);
+
+	ctx->tasks = tasks;
+	ctx->tasks_count = count;
+	ctx->next_index = 0;
+
+	up(&ctx->lock);
+
+	return 0;
+}
+
+static int rtswitch_register_task(struct rtswitch_context *ctx,
+				  struct rttst_swtest_task *arg)
+{
+	struct rtswitch_task *t;
+
+	down(&ctx->lock);
+
+	if (ctx->next_index == ctx->tasks_count) {
+		up(&ctx->lock);
+		return -EBUSY;
+	}
+
+	arg->index = ctx->next_index;
+	t = &ctx->tasks[arg->index];
+	ctx->next_index++;
+	t->base = *arg;
+	t->last_switch = 0;
+	sema_init(&t->nrt_synch, 0);
+	rtdm_event_init(&t->rt_synch, 0);
+
+	up(&ctx->lock);
+
+	return 0;
+}
+
+struct taskarg {
+	struct rtswitch_context *ctx;
+	struct rtswitch_task *task;
+};
+
+static void rtswitch_ktask(void *cookie)
+{
+	struct taskarg *arg = (struct taskarg *) cookie;
+	unsigned int fp_val, expected, to, i = 0;
+	struct rtswitch_context *ctx = arg->ctx;
+	struct rtswitch_task *task = arg->task;
+
+	to = task->base.index;
+
+	rtswitch_pend_rt(ctx, task->base.index);
+
+	while (!rtdm_task_should_stop()) {
+		if (task->base.flags & RTTST_SWTEST_USE_FPU)
+			fp_regs_set(fp_features, task->base.index + i * 1000);
+
+		switch(i % 3) {
+		case 0:
+			/* to == from means "return to last task" */
+			rtswitch_to_rt(ctx, task->base.index, task->base.index);
+			break;
+		case 1:
+			if (++to == task->base.index)
+				++to;
+			if (to > ctx->tasks_count - 1)
+				to = 0;
+			if (to == task->base.index)
+				++to;
+
+			fallthrough;
+		case 2:
+			rtswitch_to_rt(ctx, task->base.index, to);
+		}
+
+		if (task->base.flags & RTTST_SWTEST_USE_FPU) {
+			expected = task->base.index + i * 1000;
+			fp_val = fp_regs_check(fp_features, expected, report);
+
+			if (fp_val != expected) {
+				if (task->base.flags & RTTST_SWTEST_FREEZE)
+					xntrace_user_freeze(0, 0);
+				handle_ktask_error(ctx, fp_val);
+			}
+		}
+
+		if (++i == 4000000)
+			i = 0;
+	}
+}
+
+static int rtswitch_create_ktask(struct rtswitch_context *ctx,
+				 struct rttst_swtest_task *ptask)
+{
+	union xnsched_policy_param param;
+	struct xnthread_start_attr sattr;
+	struct xnthread_init_attr iattr;
+	struct rtswitch_task *task;
+	struct taskarg arg;
+	int init_flags;
+	char name[30];
+	int err;
+
+	/*
+	 * Silently disable FP tests in kernel if FPU is not supported
+	 * there. Typical case is math emulation support: we can use
+	 * it from userland as a synthetic FPU, but there is no sane
+	 * way to use it from kernel-based threads (Xenomai or Linux).
+	 */
+	if (!fp_kernel_supported())
+		ptask->flags &= ~RTTST_SWTEST_USE_FPU;
+
+	ptask->flags |= RTSWITCH_KERNEL;
+	err = rtswitch_register_task(ctx, ptask);
+
+	if (err)
+		return err;
+
+	ksformat(name, sizeof(name), "rtk%d/%u", ptask->index, ctx->cpu);
+
+	task = &ctx->tasks[ptask->index];
+
+	arg.ctx = ctx;
+	arg.task = task;
+
+	init_flags = (ptask->flags & RTTST_SWTEST_FPU) ? XNFPU : 0;
+
+	iattr.name = name;
+	iattr.flags = init_flags;
+	iattr.personality = &xenomai_personality;
+	iattr.affinity = *cpumask_of(ctx->cpu);
+	param.rt.prio = 1;
+
+	set_cpus_allowed_ptr(current, cpumask_of(ctx->cpu));
+
+	err = xnthread_init(&task->ktask,
+			    &iattr, &xnsched_class_rt, &param);
+	if (!err) {
+		sattr.mode = 0;
+		sattr.entry = rtswitch_ktask;
+		sattr.cookie = &arg;
+		err = xnthread_start(&task->ktask, &sattr);
+		if (err)
+			__xnthread_discard(&task->ktask);
+	} else
+		/*
+		 * In order to avoid calling xnthread_cancel() for an
+		 * invalid thread.
+		 */
+		task->base.flags = 0;
+	/*
+	 * Putting the argument on stack is safe, because the new
+	 * thread, thanks to the above call to set_cpus_allowed_ptr(),
+	 * will preempt the current thread immediately, and will
+	 * suspend only once the arguments on stack are used.
+	 */
+
+	return err;
+}
+
+static void rtswitch_utask_waker(rtdm_nrtsig_t *sig, void *arg)
+{
+	struct rtswitch_context *ctx = (struct rtswitch_context *)arg;
+	up(&ctx->utask->nrt_synch);
+}
+
+static int rtswitch_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+
+	ctx->tasks = NULL;
+	ctx->tasks_count = ctx->next_index = ctx->cpu = ctx->switches_count = 0;
+	sema_init(&ctx->lock, 1);
+	ctx->failed = 0;
+	ctx->error.last_switch.from = ctx->error.last_switch.to = -1;
+	ctx->pause_us = 0;
+
+	rtdm_nrtsig_init(&ctx->wake_utask, rtswitch_utask_waker, ctx);
+
+	rtdm_timer_init(&ctx->wake_up_delay, timed_wake_up, "switchtest timer");
+
+	return 0;
+}
+
+static void rtswitch_close(struct rtdm_fd *fd)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+	unsigned int i;
+
+	rtdm_timer_destroy(&ctx->wake_up_delay);
+	rtdm_nrtsig_destroy(&ctx->wake_utask);
+
+	if (ctx->tasks) {
+		set_cpus_allowed_ptr(current, cpumask_of(ctx->cpu));
+
+		for (i = 0; i < ctx->next_index; i++) {
+			struct rtswitch_task *task = &ctx->tasks[i];
+
+			if (task->base.flags & RTSWITCH_KERNEL) {
+				rtdm_task_destroy(&task->ktask);
+				rtdm_task_join(&task->ktask);
+			}
+			rtdm_event_destroy(&task->rt_synch);
+		}
+		vfree(ctx->tasks);
+	}
+}
+
+static int rtswitch_ioctl_nrt(struct rtdm_fd *fd,
+			      unsigned int request,
+			      void *arg)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+	struct rttst_swtest_task task;
+	struct rttst_swtest_dir fromto;
+	__u32 count;
+	int err;
+
+	switch (request) {
+	case RTTST_RTIOC_SWTEST_SET_TASKS_COUNT:
+		return rtswitch_set_tasks_count(ctx,
+						(unsigned long) arg);
+
+	case RTTST_RTIOC_SWTEST_SET_CPU:
+		if ((unsigned long) arg > num_online_cpus() - 1)
+			return -EINVAL;
+
+		ctx->cpu = (unsigned long) arg;
+		return 0;
+
+	case RTTST_RTIOC_SWTEST_SET_PAUSE:
+		ctx->pause_us = (unsigned long) arg;
+		return 0;
+
+	case RTTST_RTIOC_SWTEST_REGISTER_UTASK:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		err = rtswitch_register_task(ctx, &task);
+
+		if (!err)
+			rtdm_copy_to_user(fd,
+					  arg,
+					  &task,
+					  sizeof(task));
+
+		return err;
+
+	case RTTST_RTIOC_SWTEST_CREATE_KTASK:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		err = rtswitch_create_ktask(ctx, &task);
+
+		if (!err)
+			rtdm_copy_to_user(fd,
+					  arg,
+					  &task,
+					  sizeof(task));
+
+		return err;
+
+	case RTTST_RTIOC_SWTEST_PEND:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		return rtswitch_pend_nrt(ctx, task.index);
+
+	case RTTST_RTIOC_SWTEST_SWITCH_TO:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(fromto)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd,
+				    &fromto,
+				    arg,
+				    sizeof(fromto));
+
+		return rtswitch_to_nrt(ctx, fromto.from, fromto.to);
+
+	case RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(count)))
+			return -EFAULT;
+
+		count = ctx->switches_count;
+
+		rtdm_copy_to_user(fd, arg, &count, sizeof(count));
+
+		return 0;
+
+	case RTTST_RTIOC_SWTEST_GET_LAST_ERROR:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(ctx->error)))
+			return -EFAULT;
+
+		rtdm_copy_to_user(fd,
+				  arg,
+				  &ctx->error,
+				  sizeof(ctx->error));
+
+		return 0;
+
+	default:
+		return -ENOSYS;
+	}
+}
+
+static int rtswitch_ioctl_rt(struct rtdm_fd *fd,
+			     unsigned int request,
+			     void *arg)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+	struct rttst_swtest_task task;
+	struct rttst_swtest_dir fromto;
+
+	switch (request) {
+	case RTTST_RTIOC_SWTEST_PEND:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		return rtswitch_pend_rt(ctx, task.index);
+
+	case RTTST_RTIOC_SWTEST_SWITCH_TO:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(fromto)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd,
+				    &fromto,
+				    arg,
+				    sizeof(fromto));
+
+		return rtswitch_to_rt(ctx, fromto.from, fromto.to);
+
+	case RTTST_RTIOC_SWTEST_GET_LAST_ERROR:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(ctx->error)))
+			return -EFAULT;
+
+		rtdm_copy_to_user(fd,
+				  arg,
+				  &ctx->error,
+				  sizeof(ctx->error));
+
+		return 0;
+
+	default:
+		return -ENOSYS;
+	}
+}
+
+static struct rtdm_driver switchtest_driver = {
+	.profile_info = RTDM_PROFILE_INFO(switchtest,
+					  RTDM_CLASS_TESTING,
+					  RTDM_SUBCLASS_SWITCHTEST,
+					  RTTST_PROFILE_VER),
+	.device_flags = RTDM_NAMED_DEVICE,
+	.device_count =	1,
+	.context_size = sizeof(struct rtswitch_context),
+	.ops = {
+		.open = rtswitch_open,
+		.close = rtswitch_close,
+		.ioctl_rt = rtswitch_ioctl_rt,
+		.ioctl_nrt = rtswitch_ioctl_nrt,
+	},
+};
+
+static struct rtdm_device device = {
+	.driver = &switchtest_driver,
+	.label = "switchtest",
+};
+
+int __init __switchtest_init(void)
+{
+	fp_features = fp_detect();
+
+	return rtdm_dev_register(&device);
+}
+
+void __switchtest_exit(void)
+{
+	rtdm_dev_unregister(&device);
+}
+
+module_init(__switchtest_init);
+module_exit(__switchtest_exit);
+++ linux-patched/drivers/xenomai/testing/heapcheck.c	2022-03-21 12:58:31.355869790 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/heap.h>
+#include <rtdm/testing.h>
+#include <rtdm/driver.h>
+
+#define complain(__fmt, __args...)	\
+	printk(XENO_WARNING "heap check: " __fmt "\n", ##__args)
+
+static struct xnheap test_heap = {
+	.name = "test_heap"
+};
+
+enum pattern {
+	alphabet_series,
+	digit_series,
+	binary_series,
+};
+
+struct chunk {
+	void *ptr;
+	enum pattern pattern;
+};
+
+struct runstats {
+	struct rttst_heap_stats stats;
+	struct runstats *next;
+};
+
+static struct runstats *statistics;
+
+static int nrstats;
+
+static inline void breathe(int loops)
+{
+	if ((loops % 1000) == 0)
+		rtdm_task_sleep(300000ULL);
+}
+
+static inline void do_swap(void *left, void *right)
+{
+	char trans[sizeof(struct chunk)];
+
+	memcpy(trans, left, sizeof(struct chunk));
+	memcpy(left, right, sizeof(struct chunk));
+	memcpy(right, trans, sizeof(struct chunk));
+}
+
+static void random_shuffle(void *vbase, size_t nmemb)
+{
+	struct {
+		char x[sizeof(struct chunk)];
+	} __attribute__((packed)) *base = vbase;
+	unsigned int j, k;
+
+	for (j = nmemb; j > 0; j--) {
+		k = (unsigned int)(prandom_u32() % nmemb) + 1;
+		if (j == k)
+			continue;
+		do_swap(&base[j - 1], &base[k - 1]);
+	}
+}
+
+static void fill_pattern(char *p, size_t size, enum pattern pat)
+{
+	unsigned int val, count;
+
+	switch (pat) {
+	case alphabet_series:
+		val = 'a';
+		count = 26;
+		break;
+	case digit_series:
+		val = '0';
+		count = 10;
+		break;
+	default:
+		val = 0;
+		count = 255;
+		break;
+	}
+
+	while (size-- > 0) {
+		*p++ = (char)(val % count);
+		val++;
+	}
+}
+
+static int check_pattern(const char *p, size_t size, enum pattern pat)
+{
+	unsigned int val, count;
+
+	switch (pat) {
+	case alphabet_series:
+		val = 'a';
+		count = 26;
+		break;
+	case digit_series:
+		val = '0';
+		count = 10;
+		break;
+	default:
+		val = 0;
+		count = 255;
+		break;
+	}
+
+	while (size-- > 0) {
+		if (*p++ != (char)(val % count))
+			return 0;
+		val++;
+	}
+
+	return 1;
+}
+
+static size_t find_largest_free(size_t free_size, size_t block_size)
+{
+	void *p;
+
+	for (;;) {
+		p = xnheap_alloc(&test_heap, free_size);
+		if (p) {
+			xnheap_free(&test_heap, p);
+			break;
+		}
+		if (free_size <= block_size)
+			break;
+		free_size -= block_size;
+	}
+
+	return free_size;
+}
+
+static int test_seq(size_t heap_size, size_t block_size, int flags)
+{
+	long alloc_sum_ns, alloc_avg_ns, free_sum_ns, free_avg_ns,
+		alloc_max_ns, free_max_ns, d;
+	size_t user_size, largest_free, maximum_free, freed;
+	int ret, n, k, maxblocks, nrblocks;
+	nanosecs_rel_t start, end;
+	struct chunk *chunks;
+	struct runstats *st;
+	bool done_frag;
+	void *mem, *p;
+
+	maxblocks = heap_size / block_size;
+
+	mem = vmalloc(heap_size);
+	if (mem == NULL)
+		return -ENOMEM;
+
+	ret = xnheap_init(&test_heap, mem, heap_size);
+	if (ret) {
+		complain("cannot init heap with size %zu",
+		       heap_size);
+		goto out;
+	}
+
+	chunks = vmalloc(sizeof(*chunks) * maxblocks);
+	if (chunks == NULL) {
+		ret = -ENOMEM;
+		goto no_chunks;
+	}
+	memset(chunks, 0, sizeof(*chunks) * maxblocks);
+
+	ret = xnthread_harden();
+	if (ret)
+		goto done;
+
+	if (xnheap_get_size(&test_heap) != heap_size) {
+		complain("memory size inconsistency (%zu / %zu bytes)",
+			 heap_size, xnheap_get_size(&test_heap));
+		goto bad;
+	}
+
+	user_size = 0;
+	alloc_avg_ns = 0;
+	free_avg_ns = 0;
+	alloc_max_ns = 0;
+	free_max_ns = 0;
+	maximum_free = 0;
+	largest_free = 0;
+
+	for (n = 0, alloc_sum_ns = 0; ; n++) {
+		start = rtdm_clock_read_monotonic();
+		p = xnheap_alloc(&test_heap, block_size);
+		end = rtdm_clock_read_monotonic();
+		d = end - start;
+		if (d > alloc_max_ns)
+			alloc_max_ns = d;
+		alloc_sum_ns += d;
+		if (p == NULL)
+			break;
+		user_size += block_size;
+		if (n >= maxblocks) {
+			complain("too many blocks fetched"
+			       " (heap=%zu, block=%zu, "
+			       "got more than %d blocks)",
+			       heap_size, block_size, maxblocks);
+			goto bad;
+		}
+		chunks[n].ptr = p;
+		if (flags & RTTST_HEAPCHECK_PATTERN) {
+			chunks[n].pattern = (enum pattern)(prandom_u32() % 3);
+			fill_pattern(chunks[n].ptr, block_size, chunks[n].pattern);
+		}
+		breathe(n);
+	}
+
+	nrblocks = n;
+	if (nrblocks == 0)
+		goto do_stats;
+
+	if ((flags & RTTST_HEAPCHECK_ZEROOVRD) && nrblocks != maxblocks) {
+		complain("too few blocks fetched, unexpected overhead"
+			 " (heap=%zu, block=%zu, "
+			 "got %d, less than %d blocks)",
+			 heap_size, block_size, nrblocks, maxblocks);
+		goto bad;
+	}
+
+	breathe(0);
+
+	/* Make sure we did not trash any busy block while allocating. */
+	if (flags & RTTST_HEAPCHECK_PATTERN) {
+		for (n = 0; n < nrblocks; n++) {
+			if (!check_pattern(chunks[n].ptr, block_size,
+					   chunks[n].pattern)) {
+				complain("corrupted block #%d on alloc"
+					 " sequence (pattern %d)",
+					 n, chunks[n].pattern);
+				goto bad;
+			}
+			breathe(n);
+		}
+	}
+	
+	if (flags & RTTST_HEAPCHECK_SHUFFLE)
+		random_shuffle(chunks, nrblocks);
+
+	/*
+	 * Release all blocks.
+	 */
+	for (n = 0, free_sum_ns = 0, freed = 0, done_frag = false;
+	     n < nrblocks; n++) {
+		start = rtdm_clock_read_monotonic();
+		xnheap_free(&test_heap, chunks[n].ptr);
+		end = rtdm_clock_read_monotonic();
+		d = end - start;
+		if (d > free_max_ns)
+			free_max_ns = d;
+		free_sum_ns += d;
+		chunks[n].ptr = NULL;
+		/* Make sure we did not trash busy blocks while freeing. */
+		if (flags & RTTST_HEAPCHECK_PATTERN) {
+			for (k = 0; k < nrblocks; k++) {
+				if (chunks[k].ptr &&
+				    !check_pattern(chunks[k].ptr, block_size,
+						   chunks[k].pattern)) {
+					complain("corrupted block #%d on release"
+						 " sequence (pattern %d)",
+						 k, chunks[k].pattern);
+					goto bad;
+				}
+				breathe(k);
+			}
+		}
+		freed += block_size;
+		/*
+		 * Get a sense of the fragmentation for the tested
+		 * allocation pattern, heap and block sizes when half
+		 * of the usable heap size should be available to us.
+		 * NOTE: user_size excludes the overhead, this is
+		 * actually what we managed to get from the current
+		 * heap out of the allocation loop.
+		 */
+		if (!done_frag && freed >= user_size / 2) {
+			/* Calculate the external fragmentation. */
+			largest_free = find_largest_free(freed, block_size);
+			maximum_free = freed;
+			done_frag = true;
+		}
+		breathe(n);
+	}
+
+	/*
+	 * If the deallocation mechanism is broken, we might not be
+	 * able to reproduce the same allocation pattern with the same
+	 * outcome, check this.
+	 */
+	if (flags & RTTST_HEAPCHECK_HOT) {
+		for (n = 0, alloc_max_ns = alloc_sum_ns = 0; ; n++) {
+			start = rtdm_clock_read_monotonic();
+			p = xnheap_alloc(&test_heap, block_size);
+			end = rtdm_clock_read_monotonic();
+			d = end - start;
+			if (d > alloc_max_ns)
+				alloc_max_ns = d;
+			alloc_sum_ns += d;
+			if (p == NULL)
+				break;
+			if (n >= maxblocks) {
+				complain("too many blocks fetched during hot pass"
+					 " (heap=%zu, block=%zu, "
+					 "got more than %d blocks)",
+					 heap_size, block_size, maxblocks);
+				goto bad;
+			}
+			chunks[n].ptr = p;
+			breathe(n);
+		}
+		if (n != nrblocks) {
+			complain("inconsistent block count fetched"
+				 " during hot pass (heap=%zu, block=%zu, "
+				 "got %d blocks vs %d during alloc)",
+				 heap_size, block_size, n, nrblocks);
+			goto bad;
+		}
+		for (n = 0, free_max_ns = free_sum_ns = 0; n < nrblocks; n++) {
+			start = rtdm_clock_read_monotonic();
+			xnheap_free(&test_heap, chunks[n].ptr);
+			end = rtdm_clock_read_monotonic();
+			d = end - start;
+			if (d > free_max_ns)
+				free_max_ns = d;
+			free_sum_ns += d;
+			breathe(n);
+		}
+	}
+
+	alloc_avg_ns = alloc_sum_ns / nrblocks;
+	free_avg_ns = free_sum_ns / nrblocks;
+
+	if ((flags & RTTST_HEAPCHECK_ZEROOVRD) && heap_size != user_size) {
+		complain("unexpected overhead reported");
+		goto bad;
+	}
+
+	if (xnheap_get_used(&test_heap) > 0) {
+		complain("memory leakage reported: %zu bytes missing",
+			 xnheap_get_used(&test_heap));
+		goto bad;
+	}
+		
+do_stats:
+	xnthread_relax(0, 0);
+	ret = 0;
+	/*
+	 * Don't report stats when running a pattern check, timings
+	 * are affected.
+	 */
+	if (!(flags & RTTST_HEAPCHECK_PATTERN)) {
+		st = kmalloc(sizeof(*st), GFP_KERNEL);
+		if (st == NULL) {
+			complain("failed allocating memory");
+			ret = -ENOMEM;
+			goto out;
+		}
+		st->stats.heap_size = heap_size;
+		st->stats.user_size = user_size;
+		st->stats.block_size = block_size;
+		st->stats.nrblocks = nrblocks;
+		st->stats.alloc_avg_ns = alloc_avg_ns;
+		st->stats.alloc_max_ns = alloc_max_ns;
+		st->stats.free_avg_ns = free_avg_ns;
+		st->stats.free_max_ns = free_max_ns;
+		st->stats.maximum_free = maximum_free;
+		st->stats.largest_free = largest_free;
+		st->stats.flags = flags;
+		st->next = statistics;
+		statistics = st;
+		nrstats++;
+	}
+
+done:
+	vfree(chunks);
+no_chunks:
+	xnheap_destroy(&test_heap);
+out:
+	vfree(mem);
+
+	return ret;
+bad:
+	xnthread_relax(0, 0);
+	ret = -EPROTO;
+	goto done;
+}
+
+static int collect_stats(struct rtdm_fd *fd,
+			 struct rttst_heap_stats __user *buf, int nr)
+{
+	struct runstats *p, *next;
+	int ret, n;
+
+	if (nr < 0)
+		return -EINVAL;
+
+	for (p = statistics, n = nr; p && n > 0 && nrstats > 0;
+	     n--, nrstats--, p = next, buf += sizeof(p->stats)) {
+		ret = rtdm_copy_to_user(fd, buf, &p->stats, sizeof(p->stats));
+		if (ret)
+			return ret;
+		next = p->next;
+		statistics = next;
+		kfree(p);
+	}
+
+	return nr - n;
+}
+
+static void heapcheck_close(struct rtdm_fd *fd)
+{
+	struct runstats *p, *next;
+
+	for (p = statistics; p; p = next) {
+		next = p->next;
+		kfree(p);
+	}
+
+	statistics = NULL;
+}
+
+static int heapcheck_ioctl(struct rtdm_fd *fd,
+			   unsigned int request, void __user *arg)
+{
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	struct compat_rttst_heap_stathdr compat_sthdr;
+#endif
+	struct rttst_heap_stathdr sthdr;
+	struct rttst_heap_parms parms;
+	int ret;
+
+	switch (request) {
+	case RTTST_RTIOC_HEAP_CHECK:
+		ret = rtdm_copy_from_user(fd, &parms, arg, sizeof(parms));
+		if (ret)
+			return ret;
+		ret = test_seq(parms.heap_size,
+			       parms.block_size,
+			       parms.flags);
+		if (ret)
+			return ret;
+		parms.nrstats = nrstats;
+		ret = rtdm_copy_to_user(fd, arg, &parms, sizeof(parms));
+		break;
+	case RTTST_RTIOC_HEAP_STAT_COLLECT:
+		sthdr.buf = NULL;
+#ifdef CONFIG_XENO_ARCH_SYS3264
+		if (rtdm_fd_is_compat(fd)) {
+			ret = rtdm_copy_from_user(fd, &compat_sthdr, arg,
+						  sizeof(compat_sthdr));
+			if (ret)
+				return ret;
+
+			ret = collect_stats(fd, compat_ptr(compat_sthdr.buf),
+					    compat_sthdr.nrstats);
+			if (ret < 0)
+				return ret;
+
+			compat_sthdr.nrstats = ret;
+			ret = rtdm_copy_to_user(fd, arg, &compat_sthdr,
+						sizeof(compat_sthdr));
+		} else
+#endif
+		{
+			ret = rtdm_copy_from_user(fd, &sthdr, arg,
+						  sizeof(sthdr));
+			if (ret)
+				return ret;
+
+			ret = collect_stats(fd, sthdr.buf, sthdr.nrstats);
+			if (ret < 0)
+				return ret;
+
+			sthdr.nrstats = ret;
+			ret = rtdm_copy_to_user(fd, arg, &sthdr, sizeof(sthdr));
+		}
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct rtdm_driver heapcheck_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(heap_check,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_HEAPCHECK,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= 1,
+	.ops = {
+		.close		= heapcheck_close,
+		.ioctl_nrt	= heapcheck_ioctl,
+	},
+};
+
+static struct rtdm_device heapcheck_device = {
+	.driver = &heapcheck_driver,
+	.label = "heapcheck",
+};
+
+static int __init heapcheck_init(void)
+{
+	return rtdm_dev_register(&heapcheck_device);
+}
+
+static void __exit heapcheck_exit(void)
+{
+	rtdm_dev_unregister(&heapcheck_device);
+}
+
+module_init(heapcheck_init);
+module_exit(heapcheck_exit);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/Makefile	2022-03-21 12:58:31.347869868 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:1 @
--- linux/drivers/xenomai/gpio/gpio-omap.c	1970-01-01 01:00:00.000000000 +0100
+obj-$(CONFIG_XENOMAI) += autotune/ serial/ testing/ can/ net/ analogy/ ipc/ udd/ gpio/ gpiopwm/ spi/
+++ linux-patched/drivers/xenomai/gpio/gpio-omap.c	2022-03-21 12:58:31.340869936 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-sun8i-h3.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2020 Greg Gallagher <greg@embeddedgreg.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_OMAP  6
+
+static const char *compat_array[] = {
+	"ti,omap4-gpio",
+	"ti,omap3-gpio",
+	"ti,omap2-gpio",
+};
+
+static int __init omap_gpio_init(void)
+{
+	return rtdm_gpiochip_scan_array_of(NULL, compat_array,
+					   ARRAY_SIZE(compat_array),
+					   RTDM_SUBCLASS_OMAP);
+}
+module_init(omap_gpio_init);
+
+static void __exit omap_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_OMAP);
+}
+module_exit(omap_gpio_exit);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/gpio/gpio-sun8i-h3.c	2022-03-21 12:58:31.333870005 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-bcm2835.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_H3  3
+
+static int __init h3_gpio_init(void)
+{
+	int ret;
+	
+	ret = rtdm_gpiochip_scan_of(NULL, "allwinner,sun8i-h3-pinctrl",
+				    RTDM_SUBCLASS_H3);
+	if (ret)
+		return ret;
+
+	return rtdm_gpiochip_scan_of(NULL, "allwinner,sun8i-h3-r-pinctrl",
+				     RTDM_SUBCLASS_H3);
+}
+module_init(h3_gpio_init);
+
+static void __exit h3_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_H3);
+}
+module_exit(h3_gpio_exit);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/gpio/gpio-bcm2835.c	2022-03-21 12:58:31.325870083 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_BCM2835  1
+
+static int __init bcm2835_gpio_init(void)
+{
+ 	return rtdm_gpiochip_scan_of(NULL, "brcm,bcm2835-gpio",
+				     RTDM_SUBCLASS_BCM2835);
+}
+module_init(bcm2835_gpio_init);
+
+static void __exit bcm2835_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_BCM2835);
+}
+module_exit(bcm2835_gpio_exit);
+
+MODULE_LICENSE("GPL");
+
+++ linux-patched/drivers/xenomai/gpio/Kconfig	2022-03-21 12:58:31.318870151 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-mxc.c	1970-01-01 01:00:00.000000000 +0100
+menu "Real-time GPIO drivers"
+
+config XENO_DRIVERS_GPIO
+       bool "GPIO controller"
+       depends on GPIOLIB
+       help
+
+       Real-time capable GPIO module.
+
+if XENO_DRIVERS_GPIO
+
+config XENO_DRIVERS_GPIO_BCM2835
+	depends on MACH_BCM2708 || ARCH_BCM2835
+	tristate "Support for BCM2835 GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	Broadcom's BCM2835 SoC.
+
+config XENO_DRIVERS_GPIO_MXC
+	depends on GPIO_MXC
+	tristate "Support for MXC GPIOs"
+	help
+
+	Suitable for the GPIO controller available from
+	Freescale/NXP's MXC architecture.
+
+config XENO_DRIVERS_GPIO_SUN8I_H3
+	depends on MACH_SUN8I && PINCTRL_SUN8I_H3
+	tristate "Support for SUN8I H3 GPIOs"
+	help
+
+	Suitable for the GPIO controller available from Allwinner's H3
+	SoC, as found on the NanoPI boards.
+
+config XENO_DRIVERS_GPIO_ZYNQ7000
+	depends on ARCH_ZYNQ || ARCH_ZYNQMP
+	tristate "Support for Zynq7000 GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	Xilinx's Zynq7000 SoC.
+
+config XENO_DRIVERS_GPIO_XILINX
+	depends on ARCH_ZYNQ || ARCH_ZYNQMP
+	tristate "Support for Xilinx GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	Xilinx's softcore IP.
+
+config XENO_DRIVERS_GPIO_OMAP
+	depends on ARCH_OMAP2PLUS || ARCH_OMAP
+	tristate "Support for OMAP GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	OMAP family SOC.
+
+config XENO_DRIVERS_GPIO_CHERRYVIEW
+	depends on PINCTRL_CHERRYVIEW
+	tristate "Support for Cherryview GPIOs"
+	help
+
+	Enables support for the Intel Cherryview GPIO controller
+
+config XENO_DRIVERS_GPIO_DEBUG
+       bool "Enable GPIO core debugging features"
+
+endif
+
+endmenu
+++ linux-patched/drivers/xenomai/gpio/gpio-mxc.c	2022-03-21 12:58:31.310870229 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-core.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_MXC  2
+
+static const char *compat_array[] = {
+	"fsl,imx6q-gpio",
+	"fsl,imx7d-gpio",
+};
+
+static int __init mxc_gpio_init(void)
+{
+	return rtdm_gpiochip_scan_array_of(NULL, compat_array,
+					   ARRAY_SIZE(compat_array),
+					   RTDM_SUBCLASS_MXC);
+}
+module_init(mxc_gpio_init);
+
+static void __exit mxc_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_MXC);
+}
+module_exit(mxc_gpio_exit);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/gpio/gpio-core.c	2022-03-21 12:58:31.303870297 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-zynq7000.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <rtdm/gpio.h>
+
+struct rtdm_gpio_chan {
+	int requested : 1,
+		has_direction : 1,
+		is_output : 1,
+	        is_interrupt : 1,
+		want_timestamp : 1;
+};
+
+static LIST_HEAD(rtdm_gpio_chips);
+
+static DEFINE_MUTEX(chip_lock);
+
+static int gpio_pin_interrupt(rtdm_irq_t *irqh)
+{
+	struct rtdm_gpio_pin *pin;
+
+	pin = rtdm_irq_get_arg(irqh, struct rtdm_gpio_pin);
+
+	if (pin->monotonic_timestamp)
+		pin->timestamp = rtdm_clock_read_monotonic();
+	else
+		pin->timestamp = rtdm_clock_read();
+	rtdm_event_signal(&pin->event);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int request_gpio_irq(unsigned int gpio, struct rtdm_gpio_pin *pin,
+			    struct rtdm_gpio_chan *chan,
+			    int trigger)
+{
+	int ret, irq_trigger, irq;
+
+	if (trigger & ~GPIO_TRIGGER_MASK)
+		return -EINVAL;
+
+	if (!chan->requested) {
+		ret = gpio_request(gpio, pin->name);
+		if (ret) {
+			if (ret != -EPROBE_DEFER)
+				printk(XENO_ERR 
+				       "can not request GPIO%d\n", gpio);
+			return ret;
+		}
+		chan->requested = true;
+	}
+
+	ret = gpio_direction_input(gpio);
+	if (ret) {
+		printk(XENO_ERR "cannot set GPIO%d as input\n", gpio);
+		goto fail;
+	}
+
+	chan->has_direction = true;
+	gpio_export(gpio, true);
+
+	rtdm_event_clear(&pin->event);
+
+	/*
+	 * Attempt to hook the interrupt associated to that pin. We
+	 * might fail getting a valid IRQ number, in case the GPIO
+	 * chip did not define any mapping handler (->to_irq). If so,
+	 * just assume that either we have no IRQ indeed, or interrupt
+	 * handling may be open coded elsewhere.
+	 */
+	irq = gpio_to_irq(gpio);
+	if (irq < 0)
+		goto done;
+
+	irq_trigger = 0;
+	if (trigger & GPIO_TRIGGER_EDGE_RISING)
+		irq_trigger |= IRQ_TYPE_EDGE_RISING;
+	if (trigger & GPIO_TRIGGER_EDGE_FALLING)
+		irq_trigger |= IRQ_TYPE_EDGE_FALLING;
+	if (trigger & GPIO_TRIGGER_LEVEL_HIGH)
+		irq_trigger |= IRQ_TYPE_LEVEL_HIGH;
+	if (trigger & GPIO_TRIGGER_LEVEL_LOW)
+		irq_trigger |= IRQ_TYPE_LEVEL_LOW;
+
+	if (irq_trigger)
+		irq_set_irq_type(irq, irq_trigger);
+	
+	ret = rtdm_irq_request(&pin->irqh, irq, gpio_pin_interrupt,
+			       0, pin->name, pin);
+	if (ret) {
+		printk(XENO_ERR "cannot request GPIO%d interrupt\n", gpio);
+		goto fail;
+	}
+
+
+	rtdm_irq_enable(&pin->irqh);
+done:
+	chan->is_interrupt = true;
+
+	return 0;
+fail:
+	gpio_free(gpio);
+	chan->requested = false;
+
+	return ret;
+}
+
+static void release_gpio_irq(unsigned int gpio, struct rtdm_gpio_pin *pin,
+			     struct rtdm_gpio_chan *chan)
+{
+	if (chan->is_interrupt) {
+		rtdm_irq_free(&pin->irqh);
+		chan->is_interrupt = false;
+	}
+	gpio_free(gpio);
+	chan->requested = false;
+}
+
+static int gpio_pin_ioctl_nrt(struct rtdm_fd *fd,
+			      unsigned int request, void *arg)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	unsigned int gpio = rtdm_fd_minor(fd);
+	int ret = 0, val, trigger;
+	struct rtdm_gpio_pin *pin;
+	
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+
+	switch (request) {
+	case GPIO_RTIOC_DIR_OUT:
+		ret = rtdm_safe_copy_from_user(fd, &val, arg, sizeof(val));
+		if (ret)
+			return ret;
+		ret = gpio_direction_output(gpio, val);
+		if (ret == 0) {
+			chan->has_direction = true;
+			chan->is_output = true;
+		}
+		break;
+	case GPIO_RTIOC_DIR_IN:
+		ret = gpio_direction_input(gpio);
+		if (ret == 0)
+			chan->has_direction = true;
+		break;
+	case GPIO_RTIOC_IRQEN:
+		if (chan->is_interrupt) {
+			return -EBUSY;
+		}
+		ret = rtdm_safe_copy_from_user(fd, &trigger,
+					       arg, sizeof(trigger));
+		if (ret)
+			return ret;
+		ret = request_gpio_irq(gpio, pin, chan, trigger);
+		break;
+	case GPIO_RTIOC_IRQDIS:
+		if (chan->is_interrupt) {
+			release_gpio_irq(gpio, pin, chan);
+			chan->requested = false;
+			chan->is_interrupt = false;
+		}
+		break;
+	case GPIO_RTIOC_REQS:
+		ret = gpio_request(gpio, pin->name);
+		if (ret)
+			return ret;
+		else
+			chan->requested = true;
+		break;
+	case GPIO_RTIOC_RELS:
+		gpio_free(gpio);
+		chan->requested = false;
+		break;
+	case GPIO_RTIOC_TS_MONO:
+	case GPIO_RTIOC_TS_REAL:
+		ret = rtdm_safe_copy_from_user(fd, &val, arg, sizeof(val));
+		if (ret)
+			return ret;
+		chan->want_timestamp = !!val;
+		pin->monotonic_timestamp = request == GPIO_RTIOC_TS_MONO;
+		break;
+	default:
+		return -EINVAL;
+	}
+	
+	return ret;
+}
+
+static ssize_t gpio_pin_read_rt(struct rtdm_fd *fd,
+				void __user *buf, size_t len)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_gpio_readout rdo;
+	struct rtdm_gpio_pin *pin;
+	int ret;
+
+	if (!chan->has_direction)
+		return -EAGAIN;
+
+	if (chan->is_output)
+		return -EINVAL;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+
+	if (chan->want_timestamp) {
+		if (len < sizeof(rdo))
+			return -EINVAL;
+
+		if (!(fd->oflags & O_NONBLOCK)) {
+			ret = rtdm_event_wait(&pin->event);
+			if (ret)
+				return ret;
+			rdo.timestamp = pin->timestamp;
+		} else if (pin->monotonic_timestamp) {
+			rdo.timestamp = rtdm_clock_read_monotonic();
+		} else {
+			rdo.timestamp = rtdm_clock_read();
+		}
+
+		len = sizeof(rdo);
+		rdo.value = gpiod_get_raw_value(pin->desc);
+		ret = rtdm_safe_copy_to_user(fd, buf, &rdo, len);
+	} else {
+		if (len < sizeof(rdo.value))
+			return -EINVAL;
+
+		if (!(fd->oflags & O_NONBLOCK)) {
+			ret = rtdm_event_wait(&pin->event);
+			if (ret)
+				return ret;
+		}
+
+		len = sizeof(rdo.value);
+		rdo.value = gpiod_get_raw_value(pin->desc);
+		ret = rtdm_safe_copy_to_user(fd, buf, &rdo.value, len);
+	}
+	
+	return ret ?: len;
+}
+
+static ssize_t gpio_pin_write_rt(struct rtdm_fd *fd,
+				 const void __user *buf, size_t len)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_gpio_pin *pin;
+	int value, ret;
+
+	if (len < sizeof(value))
+		return -EINVAL;
+
+	if (!chan->has_direction)
+		return -EAGAIN;
+
+	if (!chan->is_output)
+		return -EINVAL;
+
+	ret = rtdm_safe_copy_from_user(fd, &value, buf, sizeof(value));
+	if (ret)
+		return ret;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+	gpiod_set_raw_value(pin->desc, value);
+
+	return sizeof(value);
+}
+
+static int gpio_pin_select(struct rtdm_fd *fd, struct xnselector *selector,
+			   unsigned int type, unsigned int index)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_gpio_pin *pin;
+
+	if (!chan->has_direction)
+		return -EAGAIN;
+
+	if (chan->is_output)
+		return -EINVAL;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+
+	return rtdm_event_select(&pin->event, selector, type, index);
+}
+
+int gpio_pin_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	unsigned int gpio = rtdm_fd_minor(fd);
+	int ret = 0;
+	struct rtdm_gpio_pin *pin;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+	ret = gpio_request(gpio, pin->name);
+	if (ret) {
+		printk(XENO_ERR "failed to request pin %d : %d\n", gpio, ret);
+		return ret;
+	} else {
+		chan->requested = true;
+	}
+
+	return 0;
+}
+
+static void gpio_pin_close(struct rtdm_fd *fd)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	unsigned int gpio = rtdm_fd_minor(fd);
+	struct rtdm_gpio_pin *pin;
+
+	if (chan->requested) {
+		pin = container_of(dev, struct rtdm_gpio_pin, dev);
+		release_gpio_irq(gpio, pin, chan);
+	}
+}
+
+static void delete_pin_devices(struct rtdm_gpio_chip *rgc)
+{
+	struct rtdm_gpio_pin *pin;
+	struct rtdm_device *dev;
+	int offset;
+
+	for (offset = 0; offset < rgc->gc->ngpio; offset++) {
+		pin = rgc->pins + offset;
+		dev = &pin->dev;
+		rtdm_dev_unregister(dev);
+		rtdm_event_destroy(&pin->event);
+		kfree(dev->label);
+		kfree(pin->name);
+	}
+}
+
+static int create_pin_devices(struct rtdm_gpio_chip *rgc)
+{
+	struct gpio_chip *gc = rgc->gc;
+	struct rtdm_gpio_pin *pin;
+	struct rtdm_device *dev;
+	int offset, ret, gpio;
+
+	for (offset = 0; offset < gc->ngpio; offset++) {
+		ret = -ENOMEM;
+		gpio = gc->base + offset;
+		pin = rgc->pins + offset;
+		pin->name = kasprintf(GFP_KERNEL, "gpio%d", gpio);
+		if (pin->name == NULL)
+			goto fail_name;
+		pin->desc = gpio_to_desc(gpio);
+		if (pin->desc == NULL) {
+			ret = -ENODEV;
+			goto fail_desc;
+		}
+		dev = &pin->dev;
+		dev->driver = &rgc->driver;
+		dev->label = kasprintf(GFP_KERNEL, "%s/gpio%%d", gc->label);
+		if (dev->label == NULL)
+			goto fail_label;
+		dev->minor = gpio;
+		dev->device_data = rgc;
+		ret = rtdm_dev_register(dev);
+		if (ret)
+			goto fail_register;
+		rtdm_event_init(&pin->event, 0);
+	}
+
+	return 0;
+
+fail_register:
+	kfree(dev->label);
+fail_desc:
+fail_label:
+	kfree(pin->name);
+fail_name:
+	delete_pin_devices(rgc);
+
+	return ret;
+}
+
+static char *gpio_pin_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "rtdm/%s/%s",
+			 dev->class->name,
+			 dev_name(dev));
+}
+
+int rtdm_gpiochip_add(struct rtdm_gpio_chip *rgc,
+		      struct gpio_chip *gc, int gpio_subclass)
+{
+	int ret;
+
+	rgc->devclass = class_create(gc->owner, gc->label);
+	if (IS_ERR(rgc->devclass)) {
+		printk(XENO_ERR "cannot create sysfs class\n");
+		return PTR_ERR(rgc->devclass);
+	}
+	rgc->devclass->devnode = gpio_pin_devnode;
+
+	rgc->driver.profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(rtdm_gpio_chip,
+				  RTDM_CLASS_GPIO,
+				  gpio_subclass,
+				  0);
+	rgc->driver.device_flags = RTDM_NAMED_DEVICE|RTDM_FIXED_MINOR;
+	rgc->driver.base_minor = gc->base;
+	rgc->driver.device_count = gc->ngpio;
+	rgc->driver.context_size = sizeof(struct rtdm_gpio_chan);
+	rgc->driver.ops = (struct rtdm_fd_ops){
+		.open		=	gpio_pin_open,
+		.close		=	gpio_pin_close,
+		.ioctl_nrt	=	gpio_pin_ioctl_nrt,
+		.read_rt	=	gpio_pin_read_rt,
+		.write_rt	=	gpio_pin_write_rt,
+		.select		=	gpio_pin_select,
+	};
+	
+	rtdm_drv_set_sysclass(&rgc->driver, rgc->devclass);
+
+	rgc->gc = gc;
+	rtdm_lock_init(&rgc->lock);
+
+	ret = create_pin_devices(rgc);
+	if (ret)
+		class_destroy(rgc->devclass);
+	
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_add);
+
+struct rtdm_gpio_chip *
+rtdm_gpiochip_alloc(struct gpio_chip *gc, int gpio_subclass)
+{
+	struct rtdm_gpio_chip *rgc;
+	size_t asize;
+	int ret;
+
+	if (gc->ngpio == 0)
+		return ERR_PTR(-EINVAL);
+
+	asize = sizeof(*rgc) + gc->ngpio * sizeof(struct rtdm_gpio_pin);
+	rgc = kzalloc(asize, GFP_KERNEL);
+	if (rgc == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_gpiochip_add(rgc, gc, gpio_subclass);
+	if (ret) {
+		kfree(rgc);
+		return ERR_PTR(ret);
+	}
+
+	mutex_lock(&chip_lock);
+	list_add(&rgc->next, &rtdm_gpio_chips);
+	mutex_unlock(&chip_lock);
+
+	return rgc;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_alloc);
+
+void rtdm_gpiochip_remove(struct rtdm_gpio_chip *rgc)
+{
+	mutex_lock(&chip_lock);
+	list_del(&rgc->next);
+	mutex_unlock(&chip_lock);
+	delete_pin_devices(rgc);
+	class_destroy(rgc->devclass);
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_remove);
+
+int rtdm_gpiochip_post_event(struct rtdm_gpio_chip *rgc,
+			     unsigned int offset)
+{
+	struct rtdm_gpio_pin *pin;
+
+	if (offset >= rgc->gc->ngpio)
+		return -EINVAL;
+
+	pin = rgc->pins + offset;
+	if (pin->monotonic_timestamp)
+		pin->timestamp = rtdm_clock_read_monotonic();
+	else
+		pin->timestamp = rtdm_clock_read();
+	rtdm_event_signal(&pin->event);
+	
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_post_event);
+
+static int gpiochip_match_name(struct gpio_chip *chip, void *data)
+{
+	const char *name = data;
+
+	return !strcmp(chip->label, name);
+}
+
+static struct gpio_chip *find_chip_by_name(const char *name)
+{
+	return gpiochip_find((void *)name, gpiochip_match_name);
+}
+
+int rtdm_gpiochip_add_by_name(struct rtdm_gpio_chip *rgc,
+			      const char *label, int gpio_subclass)
+{
+	struct gpio_chip *gc = find_chip_by_name(label);
+
+	if (gc == NULL)
+		return -EPROBE_DEFER;
+
+	return rtdm_gpiochip_add(rgc, gc, gpio_subclass);
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_add_by_name);
+
+int rtdm_gpiochip_find(struct device_node *from, const char *label, int type)
+{
+	struct rtdm_gpio_chip *rgc;
+	struct gpio_chip *chip;
+	int ret = -ENODEV;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	chip = find_chip_by_name(label);
+	if (chip == NULL)
+		return ret;
+
+	ret = 0;
+	rgc = rtdm_gpiochip_alloc(chip, type);
+	if (IS_ERR(rgc))
+		ret = PTR_ERR(rgc);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_find);
+
+int rtdm_gpiochip_array_find(struct device_node *from, const char *label[],
+			     int nentries, int type)
+{
+	int ret = -ENODEV, _ret, n;
+
+	for (n = 0; n < nentries; n++) {
+		_ret = rtdm_gpiochip_find(from, label[n], type);
+		if (_ret) {
+			if (_ret != -ENODEV)
+				return _ret;
+		} else
+			ret = 0;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_array_find);
+
+#ifdef CONFIG_OF
+
+#include <linux/of_platform.h>
+
+struct gpiochip_holder {
+	struct gpio_chip *chip;
+	struct list_head next;
+};
+	
+struct gpiochip_match_data {
+	struct device *parent;
+	struct list_head list;
+};
+
+static int match_gpio_chip(struct gpio_chip *gc, void *data)
+{
+	struct gpiochip_match_data *d = data;
+	struct gpiochip_holder *h;
+
+	if (cobalt_gpiochip_dev(gc) == d->parent) {
+		h = kmalloc(sizeof(*h), GFP_KERNEL);
+		if (h) {
+			h->chip = gc;
+			list_add(&h->next, &d->list);
+		}
+	}
+
+	/*
+	 * Iterate over all existing GPIO chips, we may have several
+	 * hosted by the same pin controller mapping different ranges.
+	 */
+	return 0;
+}
+
+int rtdm_gpiochip_scan_of(struct device_node *from, const char *compat,
+			  int type)
+{
+	struct gpiochip_match_data match;
+	struct gpiochip_holder *h, *n;
+	struct device_node *np = from;
+	struct platform_device *pdev;
+	struct rtdm_gpio_chip *rgc;
+	int ret = -ENODEV, _ret;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	for (;;) {
+		np = of_find_compatible_node(np, NULL, compat);
+		if (np == NULL)
+			break;
+		pdev = of_find_device_by_node(np);
+		of_node_put(np);
+		if (pdev == NULL)
+			break;
+		match.parent = &pdev->dev;
+		INIT_LIST_HEAD(&match.list);
+		gpiochip_find(&match, match_gpio_chip);
+		if (!list_empty(&match.list)) {
+			ret = 0;
+			list_for_each_entry_safe(h, n, &match.list, next) {
+				list_del(&h->next);
+				_ret = 0;
+				rgc = rtdm_gpiochip_alloc(h->chip, type);
+				if (IS_ERR(rgc))
+					_ret = PTR_ERR(rgc);
+				kfree(h);
+				if (_ret && !ret)
+					ret = _ret;
+			}
+			if (ret)
+				break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_scan_of);
+
+int rtdm_gpiochip_scan_array_of(struct device_node *from,
+				const char *compat[],
+				int nentries, int type)
+{
+	int ret = -ENODEV, _ret, n;
+
+	for (n = 0; n < nentries; n++) {
+		_ret = rtdm_gpiochip_scan_of(from, compat[n], type);
+		if (_ret) {
+			if (_ret != -ENODEV)
+				return _ret;
+		} else
+			ret = 0;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_scan_array_of);
+
+#endif /* CONFIG_OF */
+
+void rtdm_gpiochip_remove_by_type(int type)
+{
+	struct rtdm_gpio_chip *rgc, *n;
+
+	mutex_lock(&chip_lock);
+
+	list_for_each_entry_safe(rgc, n, &rtdm_gpio_chips, next) {
+		if (rgc->driver.profile_info.subclass_id == type) {
+			mutex_unlock(&chip_lock);
+			rtdm_gpiochip_remove(rgc);
+			kfree(rgc);
+			mutex_lock(&chip_lock);
+		}
+	}
+
+	mutex_unlock(&chip_lock);
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_remove_by_type);
+++ linux-patched/drivers/xenomai/gpio/gpio-zynq7000.c	2022-03-21 12:58:31.293870395 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/Makefile	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2017 Greg Gallagher <greg@embeddedgreg.com>
+ * 
+ * This driver is inspired by:
+ * gpio-bcm2835.c, please see original file for copyright information
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_ZYNQ7000  4
+
+static int __init zynq7000_gpio_init(void)
+{
+ 	return rtdm_gpiochip_scan_of(NULL, "xlnx,zynq-gpio-1.0", 
+                     RTDM_SUBCLASS_ZYNQ7000);
+}
+module_init(zynq7000_gpio_init);
+
+static void __exit zynq7000_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_ZYNQ7000);
+}
+module_exit(zynq7000_gpio_exit);
+
+MODULE_LICENSE("GPL");
+
+++ linux-patched/drivers/xenomai/gpio/Makefile	2022-03-21 12:58:31.285870473 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-xilinx.c	1970-01-01 01:00:00.000000000 +0100
+ccflags-$(CONFIG_XENO_DRIVERS_GPIO_DEBUG) := -DDEBUG
+
+obj-$(CONFIG_XENO_DRIVERS_GPIO_BCM2835) += xeno-gpio-bcm2835.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_MXC) += xeno-gpio-mxc.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_SUN8I_H3) += xeno-gpio-sun8i-h3.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_ZYNQ7000) += xeno-gpio-zynq7000.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_XILINX) += xeno-gpio-xilinx.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_OMAP) += xeno-gpio-omap.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_CHERRYVIEW) += xeno-gpio-cherryview.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO) += gpio-core.o
+
+xeno-gpio-bcm2835-y := gpio-bcm2835.o
+xeno-gpio-mxc-y := gpio-mxc.o
+xeno-gpio-sun8i-h3-y := gpio-sun8i-h3.o
+xeno-gpio-zynq7000-y := gpio-zynq7000.o
+xeno-gpio-xilinx-y := gpio-xilinx.o
+xeno-gpio-omap-y := gpio-omap.o
+xeno-gpio-cherryview-y := gpio-cherryview.o
+++ linux-patched/drivers/xenomai/gpio/gpio-xilinx.c	2022-03-21 12:58:31.278870541 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/gpio/gpio-cherryview.c	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2017 Greg Gallagher <greg@embeddedgreg.com>
+ *
+ * This driver controls the gpio that can be located on the PL
+ * of the Zynq SOC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_XILINX  5
+
+static int __init xilinx_gpio_init(void)
+{
+	return rtdm_gpiochip_scan_of(NULL, "xlnx,xps-gpio-1.00.a",
+                     RTDM_SUBCLASS_XILINX);
+}
+module_init(xilinx_gpio_init);
+
+static void __exit xilinx_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_XILINX);
+}
+module_exit(xilinx_gpio_exit);
+
+MODULE_LICENSE("GPL");
+
+++ linux-patched/drivers/xenomai/gpio/gpio-cherryview.c	2022-03-21 12:58:31.270870619 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/16550A_pci.h	1970-01-01 01:00:00.000000000 +0100
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * @note Copyright (C) 2021 Hongzhan Chen <hongzhan.chen@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_CHERRYVIEW  7
+
+static const char *label_array[] = {
+	"INT33FF:00",
+	"INT33FF:01",
+	"INT33FF:02",
+	"INT33FF:03",
+};
+
+static int __init cherryview_gpio_init(void)
+{
+	return rtdm_gpiochip_array_find(NULL, label_array,
+					ARRAY_SIZE(label_array),
+					RTDM_SUBCLASS_CHERRYVIEW);
+}
+module_init(cherryview_gpio_init);
+
+static void __exit cherryview_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_CHERRYVIEW);
+}
+module_exit(cherryview_gpio_exit);
+
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/serial/16550A_pci.h	2022-03-21 12:58:31.263870687 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/16550A.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006-2007 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2011 Stefan Kisdaroczi <kisda@hispeed.ch>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_PCI)
+
+#include <linux/pci.h>
+
+struct rt_16550_pci_board {
+	char *name;
+	resource_size_t resource_base_addr;
+	unsigned int nports;
+	unsigned int port_ofs;
+	unsigned long irqtype;
+	unsigned int baud_base;
+	int tx_fifo;
+};
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_PCI_MOXA)
+
+#define PCI_DEVICE_ID_CP112UL	0x1120
+#define PCI_DEVICE_ID_CP114UL	0x1143
+#define PCI_DEVICE_ID_CP138U	0x1380
+
+static const struct rt_16550_pci_board rt_16550_moxa_c104 = {
+	.name = "Moxa C104H/PCI",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_c168 = {
+	.name = "Moxa C168H/PCI",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp114 = {
+	.name = "Moxa CP-114",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp132 = {
+	.name = "Moxa CP-132",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp102u = {
+	.name = "Moxa CP-102U",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp102ul = {
+	.name = "Moxa CP-102UL",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp104u = {
+	.name = "Moxa CP-104U",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp112ul = {
+	.name = "Moxa CP-112UL",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp114ul = {
+	.name = "Moxa CP-114UL",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp118u = {
+	.name = "Moxa CP-118U",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp132u = {
+	.name = "Moxa CP-132U",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp134u = {
+	.name = "Moxa CP-134U",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp138u = {
+	.name = "Moxa CP-138U",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp168u = {
+	.name = "Moxa CP-168U",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+#endif
+
+const struct pci_device_id rt_16550_pci_table[] = {
+#if defined(CONFIG_XENO_DRIVERS_16550A_PCI_MOXA)
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104),
+	 .driver_data = (unsigned long)&rt_16550_moxa_c104},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168),
+	 .driver_data = (unsigned long)&rt_16550_moxa_c168},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp114},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp132},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp102u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp102ul},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp104u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP112UL),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp112ul},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP114UL),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp114ul},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp118u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp132u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp134u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp138u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp168u},
+#endif
+	{ }
+};
+
+static int rt_16550_pci_probe(struct pci_dev *pdev,
+			      const struct pci_device_id *ent)
+{
+	struct rt_16550_pci_board *board;
+	int err;
+	int i;
+	int port = 0;
+	int base_addr;
+	int max_devices = 0;
+
+	if (!ent->driver_data)
+		return -ENODEV;
+
+	board = (struct rt_16550_pci_board *)ent->driver_data;
+
+	for (i = 0; i < MAX_DEVICES; i++)
+		if (!rt_16550_addr_param(i))
+			max_devices++;
+
+	if (board->nports > max_devices)
+		return -ENODEV;
+
+	if ((err = pci_enable_device(pdev)))
+		return err;
+
+	base_addr = pci_resource_start(pdev, board->resource_base_addr);
+
+	for (i = 0; i < MAX_DEVICES; i++) {
+		if ((port < board->nports) && (!rt_16550_addr_param(i))) {
+			io[i] = base_addr + port * board->port_ofs;
+			irq[i] = pdev->irq;
+			irqtype[i] = board->irqtype;
+			baud_base[i] = board->baud_base;
+			tx_fifo[i] = board->tx_fifo;
+			port++;
+		}
+	}
+
+	return 0;
+}
+
+static void rt_16550_pci_remove(struct pci_dev *pdev) {
+	pci_disable_device( pdev );
+};
+
+static struct pci_driver rt_16550_pci_driver = {
+	.name     = RT_16550_DRIVER_NAME,
+	.id_table = rt_16550_pci_table,
+	.probe    = rt_16550_pci_probe,
+	.remove   = rt_16550_pci_remove
+};
+
+static int pci_registered;
+
+static inline void rt_16550_pci_init(void)
+{
+	if (pci_register_driver(&rt_16550_pci_driver) == 0)
+		pci_registered = 1;
+}
+
+static inline void rt_16550_pci_cleanup(void)
+{
+	if (pci_registered)
+		pci_unregister_driver(&rt_16550_pci_driver);
+}
+
+#else /* Linux < 2.6.0 || !CONFIG_PCI || !(..._16550A_PCI */
+
+#define rt_16550_pci_init()	do { } while (0)
+#define rt_16550_pci_cleanup()	do { } while (0)
+
+#endif /* Linux < 2.6.0 || !CONFIG_PCI || !(..._16550A_PCI */
+++ linux-patched/drivers/xenomai/serial/16550A.c	2022-03-21 12:58:31.256870756 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/rt_imx_uart.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include <rtdm/serial.h>
+#include <rtdm/driver.h>
+
+MODULE_DESCRIPTION("RTDM-based driver for 16550A UARTs");
+MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>");
+MODULE_VERSION("1.5.2");
+MODULE_LICENSE("GPL");
+
+#define RT_16550_DRIVER_NAME	"xeno_16550A"
+
+#define MAX_DEVICES		8
+
+#define IN_BUFFER_SIZE		4096
+#define OUT_BUFFER_SIZE		4096
+
+#define DEFAULT_BAUD_BASE	115200
+#define DEFAULT_TX_FIFO		16
+
+#define PARITY_MASK		0x03
+#define DATA_BITS_MASK		0x03
+#define STOP_BITS_MASK		0x01
+#define FIFO_MASK		0xC0
+#define EVENT_MASK		0x0F
+
+#define LCR_DLAB		0x80
+
+#define FCR_FIFO		0x01
+#define FCR_RESET_RX		0x02
+#define FCR_RESET_TX		0x04
+
+#define IER_RX			0x01
+#define IER_TX			0x02
+#define IER_STAT		0x04
+#define IER_MODEM		0x08
+
+#define IIR_MODEM		0x00
+#define IIR_PIRQ		0x01
+#define IIR_TX			0x02
+#define IIR_RX			0x04
+#define IIR_STAT		0x06
+#define IIR_MASK		0x07
+
+#define RHR			0	/* Receive Holding Buffer */
+#define THR			0	/* Transmit Holding Buffer */
+#define DLL			0	/* Divisor Latch LSB */
+#define IER			1	/* Interrupt Enable Register */
+#define DLM			1	/* Divisor Latch MSB */
+#define IIR			2	/* Interrupt Id Register */
+#define FCR			2	/* Fifo Control Register */
+#define LCR			3	/* Line Control Register */
+#define MCR			4	/* Modem Control Register */
+#define LSR			5	/* Line Status Register */
+#define MSR			6	/* Modem Status Register */
+
+struct rt_16550_context {
+	struct rtser_config config;	/* current device configuration */
+
+	rtdm_irq_t irq_handle;		/* device IRQ handle */
+	rtdm_lock_t lock;		/* lock to protect context struct */
+
+	unsigned long base_addr;	/* hardware IO base address */
+#ifdef CONFIG_XENO_DRIVERS_16550A_ANY
+	int io_mode;			/* hardware IO-access mode */
+#endif
+	int tx_fifo;			/* cached global tx_fifo[<device>] */
+
+	int in_head;			/* RX ring buffer, head pointer */
+	int in_tail;			/* RX ring buffer, tail pointer */
+	size_t in_npend;		/* pending bytes in RX ring */
+	int in_nwait;			/* bytes the user waits for */
+	rtdm_event_t in_event;		/* raised to unblock reader */
+	char in_buf[IN_BUFFER_SIZE];	/* RX ring buffer */
+	volatile unsigned long in_lock;	/* single-reader lock */
+	uint64_t *in_history;		/* RX timestamp buffer */
+
+	int out_head;			/* TX ring buffer, head pointer */
+	int out_tail;			/* TX ring buffer, tail pointer */
+	size_t out_npend;		/* pending bytes in TX ring */
+	rtdm_event_t out_event;		/* raised to unblock writer */
+	char out_buf[OUT_BUFFER_SIZE];	/* TX ring buffer */
+	rtdm_mutex_t out_lock;		/* single-writer mutex */
+
+	uint64_t last_timestamp;	/* timestamp of last event */
+	int ioc_events;			/* recorded events */
+	rtdm_event_t ioc_event;		/* raised to unblock event waiter */
+	volatile unsigned long ioc_event_lock;	/* single-waiter lock */
+
+	int ier_status;			/* IER cache */
+	int mcr_status;			/* MCR cache */
+	int status;			/* cache for LSR + soft-states */
+	int saved_errors;		/* error cache for RTIOC_GET_STATUS */
+};
+
+static const struct rtser_config default_config = {
+	0xFFFF, RTSER_DEF_BAUD, RTSER_DEF_PARITY, RTSER_DEF_BITS,
+	RTSER_DEF_STOPB, RTSER_DEF_HAND, RTSER_DEF_FIFO_DEPTH, 0,
+	RTSER_DEF_TIMEOUT, RTSER_DEF_TIMEOUT, RTSER_DEF_TIMEOUT,
+	RTSER_DEF_TIMESTAMP_HISTORY, RTSER_DEF_EVENT_MASK, RTSER_DEF_RS485
+};
+
+static struct rtdm_device *device[MAX_DEVICES];
+
+static unsigned int irq[MAX_DEVICES];
+static unsigned long irqtype[MAX_DEVICES] = {
+	[0 ... MAX_DEVICES-1] = RTDM_IRQTYPE_SHARED | RTDM_IRQTYPE_EDGE
+};
+static unsigned int baud_base[MAX_DEVICES];
+static int tx_fifo[MAX_DEVICES];
+
+module_param_array(irq, uint, NULL, 0400);
+module_param_array(baud_base, uint, NULL, 0400);
+module_param_array(tx_fifo, int, NULL, 0400);
+
+MODULE_PARM_DESC(irq, "IRQ numbers of the serial devices");
+MODULE_PARM_DESC(baud_base, "Maximum baud rate of the serial device "
+		 "(internal clock rate / 16)");
+MODULE_PARM_DESC(tx_fifo, "Transmitter FIFO size");
+
+#include "16550A_io.h"
+#include "16550A_pnp.h"
+#include "16550A_pci.h"
+
+static inline int rt_16550_rx_interrupt(struct rt_16550_context *ctx,
+					uint64_t * timestamp)
+{
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+	int rbytes = 0;
+	int lsr = 0;
+	int c;
+
+	do {
+		c = rt_16550_reg_in(mode, base, RHR);	/* read input char */
+
+		ctx->in_buf[ctx->in_tail] = c;
+		if (ctx->in_history)
+			ctx->in_history[ctx->in_tail] = *timestamp;
+		ctx->in_tail = (ctx->in_tail + 1) & (IN_BUFFER_SIZE - 1);
+
+		if (++ctx->in_npend > IN_BUFFER_SIZE) {
+			lsr |= RTSER_SOFT_OVERRUN_ERR;
+			ctx->in_npend--;
+		}
+
+		rbytes++;
+		lsr &= ~RTSER_LSR_DATA;
+		lsr |= (rt_16550_reg_in(mode, base, LSR) &
+			(RTSER_LSR_DATA | RTSER_LSR_OVERRUN_ERR |
+			 RTSER_LSR_PARITY_ERR | RTSER_LSR_FRAMING_ERR |
+			 RTSER_LSR_BREAK_IND));
+	} while (lsr & RTSER_LSR_DATA);
+
+	/* save new errors */
+	ctx->status |= lsr;
+
+	/* If we are enforcing the RTSCTS control flow and the input
+	   buffer is busy above the specified high watermark, clear
+	   RTS. */
+/*	if (uart->i_count >= uart->config.rts_hiwm &&
+	    (uart->config.handshake & RT_UART_RTSCTS) != 0 &&
+	    (uart->modem & MCR_RTS) != 0) {
+		uart->modem &= ~MCR_RTS;
+		rt_16550_reg_out(mode, base, MCR, uart->modem);
+	}*/
+
+	return rbytes;
+}
+
+static void rt_16550_tx_fill(struct rt_16550_context *ctx)
+{
+	int c;
+	int count;
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+
+/*	if (uart->modem & MSR_CTS)*/
+	{
+		for (count = ctx->tx_fifo;
+		     (count > 0) && (ctx->out_npend > 0);
+		     count--, ctx->out_npend--) {
+			c = ctx->out_buf[ctx->out_head++];
+			rt_16550_reg_out(mode, base, THR, c);
+			ctx->out_head &= (OUT_BUFFER_SIZE - 1);
+		}
+	}
+}
+
+static inline void rt_16550_stat_interrupt(struct rt_16550_context *ctx)
+{
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+
+	ctx->status |= (rt_16550_reg_in(mode, base, LSR) &
+			(RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			 RTSER_LSR_FRAMING_ERR | RTSER_LSR_BREAK_IND));
+}
+
+static int rt_16550_interrupt(rtdm_irq_t * irq_context)
+{
+	struct rt_16550_context *ctx;
+	unsigned long base;
+	int mode;
+	int iir;
+	uint64_t timestamp = rtdm_clock_read();
+	int rbytes = 0;
+	int events = 0;
+	int modem;
+	int ret = RTDM_IRQ_NONE;
+
+	ctx = rtdm_irq_get_arg(irq_context, struct rt_16550_context);
+	base = ctx->base_addr;
+	mode = rt_16550_io_mode_from_ctx(ctx);
+
+	rtdm_lock_get(&ctx->lock);
+
+	while (1) {
+		iir = rt_16550_reg_in(mode, base, IIR) & IIR_MASK;
+		if (iir & IIR_PIRQ)
+			break;
+
+		if (iir == IIR_RX) {
+			rbytes += rt_16550_rx_interrupt(ctx, &timestamp);
+			events |= RTSER_EVENT_RXPEND;
+		} else if (iir == IIR_STAT)
+			rt_16550_stat_interrupt(ctx);
+		else if (iir == IIR_TX)
+			rt_16550_tx_fill(ctx);
+		else if (iir == IIR_MODEM) {
+			modem = rt_16550_reg_in(mode, base, MSR);
+			if (modem & (modem << 4))
+				events |= RTSER_EVENT_MODEMHI;
+			if ((modem ^ 0xF0) & (modem << 4))
+				events |= RTSER_EVENT_MODEMLO;
+		}
+
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	if (ctx->in_nwait > 0) {
+		if ((ctx->in_nwait <= rbytes) || ctx->status) {
+			ctx->in_nwait = 0;
+			rtdm_event_signal(&ctx->in_event);
+		} else
+			ctx->in_nwait -= rbytes;
+	}
+
+	if (ctx->status) {
+		events |= RTSER_EVENT_ERRPEND;
+		ctx->ier_status &= ~IER_STAT;
+	}
+
+	if (events & ctx->config.event_mask) {
+		int old_events = ctx->ioc_events;
+
+		ctx->last_timestamp = timestamp;
+		ctx->ioc_events = events;
+
+		if (!old_events)
+			rtdm_event_signal(&ctx->ioc_event);
+	}
+
+	if ((ctx->ier_status & IER_TX) && (ctx->out_npend == 0)) {
+		/* mask transmitter empty interrupt */
+		ctx->ier_status &= ~IER_TX;
+
+		rtdm_event_signal(&ctx->out_event);
+	}
+
+	/* update interrupt mask */
+	rt_16550_reg_out(mode, base, IER, ctx->ier_status);
+
+	rtdm_lock_put(&ctx->lock);
+
+	return ret;
+}
+
+static int rt_16550_set_config(struct rt_16550_context *ctx,
+			       const struct rtser_config *config,
+			       uint64_t **in_history_ptr)
+{
+	rtdm_lockctx_t lock_ctx;
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+	int err = 0;
+
+	/* make line configuration atomic and IRQ-safe */
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	if (config->config_mask & RTSER_SET_BAUD) {
+		int dev_id = rtdm_fd_minor(rtdm_private_to_fd(ctx));
+		int baud_div;
+
+		ctx->config.baud_rate = config->baud_rate;
+		baud_div = (baud_base[dev_id] + (ctx->config.baud_rate>>1)) /
+			ctx->config.baud_rate;
+		rt_16550_reg_out(mode, base, LCR, LCR_DLAB);
+		rt_16550_reg_out(mode, base, DLL, baud_div & 0xff);
+		rt_16550_reg_out(mode, base, DLM, baud_div >> 8);
+	}
+
+	if (config->config_mask & RTSER_SET_PARITY)
+		ctx->config.parity = config->parity & PARITY_MASK;
+	if (config->config_mask & RTSER_SET_DATA_BITS)
+		ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
+	if (config->config_mask & RTSER_SET_STOP_BITS)
+		ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
+
+	if (config->config_mask & (RTSER_SET_PARITY |
+				   RTSER_SET_DATA_BITS |
+				   RTSER_SET_STOP_BITS |
+				   RTSER_SET_BAUD)) {
+		rt_16550_reg_out(mode, base, LCR,
+				 (ctx->config.parity << 3) |
+				 (ctx->config.stop_bits << 2) |
+				 ctx->config.data_bits);
+		ctx->status = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+	}
+
+	if (config->config_mask & RTSER_SET_FIFO_DEPTH) {
+		ctx->config.fifo_depth = config->fifo_depth & FIFO_MASK;
+		rt_16550_reg_out(mode, base, FCR,
+				 FCR_FIFO | FCR_RESET_RX | FCR_RESET_TX);
+		rt_16550_reg_out(mode, base, FCR,
+				 FCR_FIFO | ctx->config.fifo_depth);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	/* Timeout manipulation is not atomic. The user is supposed to take
+	   care not to use and change timeouts at the same time. */
+	if (config->config_mask & RTSER_SET_TIMEOUT_RX)
+		ctx->config.rx_timeout = config->rx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_TX)
+		ctx->config.tx_timeout = config->tx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
+		ctx->config.event_timeout = config->event_timeout;
+
+	if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+		/* change timestamp history atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
+			if (!ctx->in_history) {
+				ctx->in_history = *in_history_ptr;
+				*in_history_ptr = NULL;
+				if (!ctx->in_history)
+					err = -ENOMEM;
+			}
+		} else {
+			*in_history_ptr = ctx->in_history;
+			ctx->in_history = NULL;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	if (config->config_mask & RTSER_SET_EVENT_MASK) {
+		/* change event mask atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		ctx->config.event_mask = config->event_mask & EVENT_MASK;
+		ctx->ioc_events = 0;
+
+		if ((config->event_mask & RTSER_EVENT_RXPEND) &&
+		    (ctx->in_npend > 0))
+			ctx->ioc_events |= RTSER_EVENT_RXPEND;
+
+		if ((config->event_mask & RTSER_EVENT_ERRPEND)
+		    && ctx->status)
+			ctx->ioc_events |= RTSER_EVENT_ERRPEND;
+
+		if (config->event_mask & (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+			/* enable modem status interrupt */
+			ctx->ier_status |= IER_MODEM;
+		else
+			/* disable modem status interrupt */
+			ctx->ier_status &= ~IER_MODEM;
+		rt_16550_reg_out(mode, base, IER, ctx->ier_status);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	if (config->config_mask & RTSER_SET_HANDSHAKE) {
+		/* change handshake atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		ctx->config.handshake = config->handshake;
+
+		switch (ctx->config.handshake) {
+		case RTSER_RTSCTS_HAND:
+			// ...?
+
+		default:	/* RTSER_NO_HAND */
+			ctx->mcr_status =
+			    RTSER_MCR_DTR | RTSER_MCR_RTS | RTSER_MCR_OUT2;
+			break;
+		}
+		rt_16550_reg_out(mode, base, MCR, ctx->mcr_status);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	return err;
+}
+
+void rt_16550_cleanup_ctx(struct rt_16550_context *ctx)
+{
+	rtdm_event_destroy(&ctx->in_event);
+	rtdm_event_destroy(&ctx->out_event);
+	rtdm_event_destroy(&ctx->ioc_event);
+	rtdm_mutex_destroy(&ctx->out_lock);
+}
+
+int rt_16550_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_16550_context *ctx;
+	int dev_id = rtdm_fd_minor(fd);
+	int err;
+	uint64_t *dummy;
+	rtdm_lockctx_t lock_ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	/* IPC initialisation - cannot fail with used parameters */
+	rtdm_lock_init(&ctx->lock);
+	rtdm_event_init(&ctx->in_event, 0);
+	rtdm_event_init(&ctx->out_event, 0);
+	rtdm_event_init(&ctx->ioc_event, 0);
+	rtdm_mutex_init(&ctx->out_lock);
+
+	rt_16550_init_io_ctx(dev_id, ctx);
+
+	ctx->tx_fifo = tx_fifo[dev_id];
+
+	ctx->in_head = 0;
+	ctx->in_tail = 0;
+	ctx->in_npend = 0;
+	ctx->in_nwait = 0;
+	ctx->in_lock = 0;
+	ctx->in_history = NULL;
+
+	ctx->out_head = 0;
+	ctx->out_tail = 0;
+	ctx->out_npend = 0;
+
+	ctx->ioc_events = 0;
+	ctx->ioc_event_lock = 0;
+	ctx->status = 0;
+	ctx->saved_errors = 0;
+
+	rt_16550_set_config(ctx, &default_config, &dummy);
+
+	err = rtdm_irq_request(&ctx->irq_handle, irq[dev_id],
+			rt_16550_interrupt, irqtype[dev_id],
+			rtdm_fd_device(fd)->name, ctx);
+	if (err) {
+		/* reset DTR and RTS */
+		rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx), ctx->base_addr,
+				 MCR, 0);
+
+		rt_16550_cleanup_ctx(ctx);
+
+		return err;
+	}
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/* enable interrupts */
+	ctx->ier_status = IER_RX;
+	rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx), ctx->base_addr, IER,
+			 IER_RX);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	return 0;
+}
+
+void rt_16550_close(struct rtdm_fd *fd)
+{
+	struct rt_16550_context *ctx;
+	unsigned long base;
+	int mode;
+	uint64_t *in_history;
+	rtdm_lockctx_t lock_ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+	base = ctx->base_addr;
+	mode = rt_16550_io_mode_from_ctx(ctx);
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/* reset DTR and RTS */
+	rt_16550_reg_out(mode, base, MCR, 0);
+
+	/* mask all UART interrupts and clear pending ones. */
+	rt_16550_reg_out(mode, base, IER, 0);
+	rt_16550_reg_in(mode, base, IIR);
+	rt_16550_reg_in(mode, base, LSR);
+	rt_16550_reg_in(mode, base, RHR);
+	rt_16550_reg_in(mode, base, MSR);
+
+	in_history = ctx->in_history;
+	ctx->in_history = NULL;
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rtdm_irq_free(&ctx->irq_handle);
+
+	rt_16550_cleanup_ctx(ctx);
+
+	kfree(in_history);
+}
+
+int rt_16550_ioctl(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	rtdm_lockctx_t lock_ctx;
+	struct rt_16550_context *ctx;
+	int err = 0;
+	unsigned long base;
+	int mode;
+
+	ctx = rtdm_fd_to_private(fd);
+	base = ctx->base_addr;
+	mode = rt_16550_io_mode_from_ctx(ctx);
+
+	switch (request) {
+	case RTSER_RTIOC_GET_CONFIG:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->config,
+						   sizeof(struct
+							  rtser_config));
+		else
+			memcpy(arg, &ctx->config,
+			       sizeof(struct rtser_config));
+		break;
+
+	case RTSER_RTIOC_SET_CONFIG: {
+		struct rtser_config *config;
+		struct rtser_config config_buf;
+		uint64_t *hist_buf = NULL;
+
+		config = (struct rtser_config *)arg;
+
+		if (rtdm_fd_is_user(fd)) {
+			err =
+			    rtdm_safe_copy_from_user(fd, &config_buf,
+						     arg,
+						     sizeof(struct
+							    rtser_config));
+			if (err)
+				return err;
+
+			config = &config_buf;
+		}
+
+		if ((config->config_mask & RTSER_SET_BAUD) &&
+		    (config->baud_rate >
+			    baud_base[rtdm_fd_minor(fd)] ||
+			    config->baud_rate <= 0))
+			/* invalid baudrate for this port */
+			return -EINVAL;
+
+		if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+			/*
+			 * Reflect the call to non-RT as we will likely
+			 * allocate or free the buffer.
+			 */
+			if (rtdm_in_rt_context())
+				return -ENOSYS;
+
+			if (config->timestamp_history &
+			    RTSER_RX_TIMESTAMP_HISTORY)
+				hist_buf = kmalloc(IN_BUFFER_SIZE *
+						   sizeof(nanosecs_abs_t),
+						   GFP_KERNEL);
+		}
+
+		rt_16550_set_config(ctx, config, &hist_buf);
+
+		if (hist_buf)
+			kfree(hist_buf);
+
+		break;
+	}
+
+	case RTSER_RTIOC_GET_STATUS: {
+		int status;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		status = ctx->saved_errors | ctx->status;
+		ctx->status = 0;
+		ctx->saved_errors = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rtser_status status_buf;
+
+			status_buf.line_status =
+			    rt_16550_reg_in(mode, base, LSR) | status;
+			status_buf.modem_status =
+			    rt_16550_reg_in(mode, base, MSR);
+
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &status_buf,
+						   sizeof(struct
+							  rtser_status));
+		} else {
+			((struct rtser_status *)arg)->line_status =
+			    rt_16550_reg_in(mode, base, LSR) | status;
+			((struct rtser_status *)arg)->modem_status =
+			    rt_16550_reg_in(mode, base, MSR);
+		}
+		break;
+	}
+
+	case RTSER_RTIOC_GET_CONTROL:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->mcr_status,
+						   sizeof(int));
+		else
+			*(int *)arg = ctx->mcr_status;
+
+		break;
+
+	case RTSER_RTIOC_SET_CONTROL: {
+		int new_mcr = (long)arg;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		ctx->mcr_status = new_mcr;
+		rt_16550_reg_out(mode, base, MCR, new_mcr);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTSER_RTIOC_WAIT_EVENT: {
+		struct rtser_event ev = { .rxpend_timestamp = 0 };
+		rtdm_toseq_t timeout_seq;
+
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		/* Only one waiter allowed, stop any further attempts here. */
+		if (test_and_set_bit(0, &ctx->ioc_event_lock))
+			return -EBUSY;
+
+		rtdm_toseq_init(&timeout_seq, ctx->config.event_timeout);
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		while (!ctx->ioc_events) {
+			/* Only enable error interrupt
+			   when the user waits for it. */
+			if (ctx->config.event_mask & RTSER_EVENT_ERRPEND) {
+				ctx->ier_status |= IER_STAT;
+				rt_16550_reg_out(mode, base, IER,
+						 ctx->ier_status);
+			}
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			err = rtdm_event_timedwait(&ctx->ioc_event,
+						   ctx->config.event_timeout,
+						   &timeout_seq);
+			if (err) {
+				/* Device has been closed? */
+				if (err == -EIDRM)
+					err = -EBADF;
+				goto wait_unlock_out;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		}
+
+		ev.events = ctx->ioc_events;
+		ctx->ioc_events &=
+		    ~(RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO);
+
+		ev.last_timestamp = ctx->last_timestamp;
+		ev.rx_pending = ctx->in_npend;
+
+		if (ctx->in_history)
+			ev.rxpend_timestamp = ctx->in_history[ctx->in_head];
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg, &ev,
+						   sizeof(struct
+							  rtser_event));
+			else
+				memcpy(arg, &ev, sizeof(struct rtser_event));
+
+	      wait_unlock_out:
+		/* release the simple event waiter lock */
+		clear_bit(0, &ctx->ioc_event_lock);
+		break;
+	}
+
+	case RTSER_RTIOC_BREAK_CTL: {
+		int lcr = ((long)arg & RTSER_BREAK_SET) << 6;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		lcr |=
+		    (ctx->config.parity << 3) | (ctx->config.stop_bits << 2) |
+		    ctx->config.data_bits;
+
+		rt_16550_reg_out(mode, base, LCR, lcr);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTIOC_PURGE: {
+		int fcr = 0;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTDM_PURGE_RX_BUFFER) {
+			ctx->in_head = 0;
+			ctx->in_tail = 0;
+			ctx->in_npend = 0;
+			ctx->status = 0;
+			fcr |= FCR_FIFO | FCR_RESET_RX;
+			rt_16550_reg_in(mode, base, RHR);
+		}
+		if ((long)arg & RTDM_PURGE_TX_BUFFER) {
+			ctx->out_head = 0;
+			ctx->out_tail = 0;
+			ctx->out_npend = 0;
+			fcr |= FCR_FIFO | FCR_RESET_TX;
+		}
+		if (fcr) {
+			rt_16550_reg_out(mode, base, FCR, fcr);
+			rt_16550_reg_out(mode, base, FCR,
+					 FCR_FIFO | ctx->config.fifo_depth);
+		}
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+ssize_t rt_16550_read(struct rtdm_fd *fd, void *buf, size_t nbyte)
+{
+	struct rt_16550_context *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t read = 0;
+	int pending;
+	int block;
+	int subblock;
+	int in_pos;
+	char *out_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret = -EAGAIN;	/* for non-blocking read */
+	int nonblocking;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_rw_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* non-blocking is handled separately here */
+	nonblocking = (ctx->config.rx_timeout < 0);
+
+	/* only one reader allowed, stop any further attempts here */
+	if (test_and_set_bit(0, &ctx->in_lock))
+		return -EBUSY;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	while (1) {
+		/* switch on error interrupt - the user is ready to listen */
+		if ((ctx->ier_status & IER_STAT) == 0) {
+			ctx->ier_status |= IER_STAT;
+			rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx),
+					 ctx->base_addr, IER,
+					 ctx->ier_status);
+		}
+
+		if (ctx->status) {
+			if (ctx->status & RTSER_LSR_BREAK_IND)
+				ret = -EPIPE;
+			else
+				ret = -EIO;
+			ctx->saved_errors = ctx->status &
+			    (RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			     RTSER_LSR_FRAMING_ERR | RTSER_SOFT_OVERRUN_ERR);
+			ctx->status = 0;
+			break;
+		}
+
+		pending = ctx->in_npend;
+
+		if (pending > 0) {
+			block = subblock = (pending <= nbyte) ? pending : nbyte;
+			in_pos = ctx->in_head;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (in_pos + subblock > IN_BUFFER_SIZE) {
+				/* Treat the block between head and buffer end
+				   separately. */
+				subblock = IN_BUFFER_SIZE - in_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_to_user
+					    (fd, out_pos,
+					     &ctx->in_buf[in_pos],
+					     subblock) != 0) {
+						ret = -EFAULT;
+						goto break_unlocked;
+					}
+				} else
+					memcpy(out_pos, &ctx->in_buf[in_pos],
+					       subblock);
+
+				read += subblock;
+				out_pos += subblock;
+
+				subblock = block - subblock;
+				in_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_to_user(fd, out_pos,
+						      &ctx->in_buf[in_pos],
+						      subblock) != 0) {
+					ret = -EFAULT;
+					goto break_unlocked;
+				}
+			} else
+				memcpy(out_pos, &ctx->in_buf[in_pos], subblock);
+
+			read += subblock;
+			out_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->in_head =
+			    (ctx->in_head + block) & (IN_BUFFER_SIZE - 1);
+			if ((ctx->in_npend -= block) == 0)
+				ctx->ioc_events &= ~RTSER_EVENT_RXPEND;
+
+			if (nbyte == 0)
+				break; /* All requested bytes read. */
+
+			continue;
+		}
+
+		if (nonblocking)
+			/* ret was set to EAGAIN in case of a real
+			   non-blocking call or contains the error
+			   returned by rtdm_event_wait[_until] */
+			break;
+
+		ctx->in_nwait = nbyte;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->in_event,
+					   ctx->config.rx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			nonblocking = 1;
+			if (ctx->in_npend > 0) {
+				/* Final turn: collect pending bytes
+				   before exit. */
+				continue;
+			}
+
+			ctx->in_nwait = 0;
+			break;
+		}
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+break_unlocked:
+	/* Release the simple reader lock, */
+	clear_bit(0, &ctx->in_lock);
+
+	if ((read > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			   (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = read;
+
+	return ret;
+}
+
+ssize_t rt_16550_write(struct rtdm_fd *fd, const void *buf, size_t nbyte)
+{
+	struct rt_16550_context *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t written = 0;
+	int free;
+	int block;
+	int subblock;
+	int out_pos;
+	int lsr;
+	char *in_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_read_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.tx_timeout);
+
+	/* Make write operation atomic. */
+	ret = rtdm_mutex_timedlock(&ctx->out_lock, ctx->config.tx_timeout,
+				   &timeout_seq);
+	if (ret)
+		return ret;
+
+	while (nbyte > 0) {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		free = OUT_BUFFER_SIZE - ctx->out_npend;
+
+		if (free > 0) {
+			block = subblock = (nbyte <= free) ? nbyte : free;
+			out_pos = ctx->out_tail;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (out_pos + subblock > OUT_BUFFER_SIZE) {
+				/* Treat the block between head and buffer
+				   end separately. */
+				subblock = OUT_BUFFER_SIZE - out_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_from_user
+					    (fd,
+					     &ctx->out_buf[out_pos],
+					     in_pos, subblock) != 0) {
+						ret = -EFAULT;
+						break;
+					}
+				} else
+					memcpy(&ctx->out_buf[out_pos], in_pos,
+					       subblock);
+
+				written += subblock;
+				in_pos += subblock;
+
+				subblock = block - subblock;
+				out_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_from_user
+				    (fd, &ctx->out_buf[out_pos],
+				     in_pos, subblock) != 0) {
+					ret = -EFAULT;
+					break;
+				}
+			} else
+				memcpy(&ctx->out_buf[out_pos], in_pos, block);
+
+			written += subblock;
+			in_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->out_tail =
+			    (ctx->out_tail + block) & (OUT_BUFFER_SIZE - 1);
+			ctx->out_npend += block;
+
+			lsr = rt_16550_reg_in(rt_16550_io_mode_from_ctx(ctx),
+					      ctx->base_addr, LSR);
+			if (lsr & RTSER_LSR_THR_EMTPY)
+				rt_16550_tx_fill(ctx);
+
+			if (ctx->out_npend > 0 && !(ctx->ier_status & IER_TX)) {
+				/* unmask tx interrupt */
+				ctx->ier_status |= IER_TX;
+				rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx),
+						 ctx->base_addr, IER,
+						 ctx->ier_status);
+			}
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+			continue;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret =
+		    rtdm_event_timedwait(&ctx->out_event,
+					 ctx->config.tx_timeout,
+					 &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+			if (ret == -EWOULDBLOCK) {
+				/* Fix error code for non-blocking mode. */
+				ret = -EAGAIN;
+			}
+			break;
+		}
+	}
+
+	rtdm_mutex_unlock(&ctx->out_lock);
+
+	if ((written > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			      (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = written;
+
+	return ret;
+}
+
+static struct rtdm_driver uart16550A_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(uart16550A,
+						    RTDM_CLASS_SERIAL,
+						    RTDM_SUBCLASS_16550A,
+						    RTSER_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= MAX_DEVICES,
+	.context_size		= sizeof(struct rt_16550_context),
+	.ops = {
+		.open		= rt_16550_open,
+		.close		= rt_16550_close,
+		.ioctl_rt	= rt_16550_ioctl,
+		.ioctl_nrt	= rt_16550_ioctl,
+		.read_rt	= rt_16550_read,
+		.write_rt	= rt_16550_write,
+	},
+};
+
+void rt_16550_exit(void);
+
+int __init rt_16550_init(void)
+{
+	struct rtdm_device *dev;
+	unsigned long base;
+	char *name;
+	int mode;
+	int err;
+	int i;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	rt_16550_pnp_init();
+	rt_16550_pci_init();
+
+	for (i = 0; i < MAX_DEVICES; i++) {
+		if (!rt_16550_addr_param(i))
+			continue;
+
+		err = -EINVAL;
+		if (!irq[i] || !rt_16550_addr_param_valid(i))
+			goto cleanup_out;
+
+		dev = kmalloc(sizeof(struct rtdm_device) +
+			      RTDM_MAX_DEVNAME_LEN, GFP_KERNEL);
+		err = -ENOMEM;
+		if (!dev)
+			goto cleanup_out;
+
+		dev->driver = &uart16550A_driver;
+		dev->label = "rtser%d";
+		name = (char *)(dev + 1);
+		ksformat(name, RTDM_MAX_DEVNAME_LEN, dev->label, i);
+
+		err = rt_16550_init_io(i, name);
+		if (err)
+			goto kfree_out;
+
+		if (baud_base[i] == 0)
+			baud_base[i] = DEFAULT_BAUD_BASE;
+
+		if (tx_fifo[i] == 0)
+			tx_fifo[i] = DEFAULT_TX_FIFO;
+
+		/* Mask all UART interrupts and clear pending ones. */
+		base = rt_16550_base_addr(i);
+		mode = rt_16550_io_mode(i);
+		rt_16550_reg_out(mode, base, IER, 0);
+		rt_16550_reg_in(mode, base, IIR);
+		rt_16550_reg_in(mode, base, LSR);
+		rt_16550_reg_in(mode, base, RHR);
+		rt_16550_reg_in(mode, base, MSR);
+
+		err = rtdm_dev_register(dev);
+
+		if (err)
+			goto release_io_out;
+
+		device[i] = dev;
+	}
+
+	return 0;
+
+      release_io_out:
+	rt_16550_release_io(i);
+
+      kfree_out:
+	kfree(dev);
+
+      cleanup_out:
+	rt_16550_exit();
+
+	return err;
+}
+
+void rt_16550_exit(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_DEVICES; i++)
+		if (device[i]) {
+			rtdm_dev_unregister(device[i]);
+			rt_16550_release_io(i);
+			kfree(device[i]);
+		}
+
+	rt_16550_pci_cleanup();
+	rt_16550_pnp_cleanup();
+}
+
+module_init(rt_16550_init);
+module_exit(rt_16550_exit);
+++ linux-patched/drivers/xenomai/serial/rt_imx_uart.c	2022-03-21 12:58:31.248870834 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/16550A_pnp.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright 2012 Wolfgang Grandegger <wg@denx.de>
+ *
+ * Derived from the Linux IMX UART driver (drivers/tty/serial/imx.c)
+ * and 16650A RTserial driver.
+ *
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2004 Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/platform_device.h>
+#include <linux/sysrq.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/rational.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/div64.h>
+#include <linux/platform_data/serial-imx.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <rtdm/serial.h>
+#include <rtdm/driver.h>
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTDM-based driver for IMX UARTs");
+MODULE_VERSION("1.0.0");
+MODULE_LICENSE("GPL");
+
+#define DRIVER_NAME	"xeno_imx_uart"
+
+/* Register definitions */
+#define URXD0	0x0  /* Receiver Register */
+#define URTX0	0x40 /* Transmitter Register */
+#define UCR1	0x80 /* Control Register 1 */
+#define UCR2	0x84 /* Control Register 2 */
+#define UCR3	0x88 /* Control Register 3 */
+#define UCR4	0x8c /* Control Register 4 */
+#define UFCR	0x90 /* FIFO Control Register */
+#define USR1	0x94 /* Status Register 1 */
+#define USR2	0x98 /* Status Register 2 */
+#define UESC	0x9c /* Escape Character Register */
+#define UTIM	0xa0 /* Escape Timer Register */
+#define UBIR	0xa4 /* BRM Incremental Register */
+#define UBMR	0xa8 /* BRM Modulator Register */
+#define UBRC	0xac /* Baud Rate Count Register */
+#define MX2_ONEMS 0xb0 /* One Millisecond register */
+#define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
+#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
+
+
+
+/* UART Control Register Bit Fields.*/
+#define URXD_CHARRDY	(1<<15)
+#define URXD_ERR	(1<<14)
+#define URXD_OVRRUN	(1<<13)
+#define URXD_FRMERR	(1<<12)
+#define URXD_BRK	(1<<11)
+#define URXD_PRERR	(1<<10)
+#define UCR1_ADEN	(1<<15) /* Auto dectect interrupt */
+#define UCR1_ADBR	(1<<14) /* Auto detect baud rate */
+#define UCR1_TRDYEN	(1<<13) /* Transmitter ready interrupt enable */
+#define UCR1_IDEN	(1<<12) /* Idle condition interrupt */
+#define UCR1_RRDYEN	(1<<9)	/* Recv ready interrupt enable */
+#define UCR1_RDMAEN	(1<<8)	/* Recv ready DMA enable */
+#define UCR1_IREN	(1<<7)	/* Infrared interface enable */
+#define UCR1_TXMPTYEN	(1<<6)	/* Transimitter empty interrupt enable */
+#define UCR1_RTSDEN	(1<<5)	/* RTS delta interrupt enable */
+#define UCR1_SNDBRK	(1<<4)	/* Send break */
+#define UCR1_TDMAEN	(1<<3)	/* Transmitter ready DMA enable */
+#define MX1_UCR1_UARTCLKEN	(1<<2)	/* UART clock enabled, mx1 only */
+#define UCR1_DOZE	(1<<1)	/* Doze */
+#define UCR1_UARTEN	(1<<0)	/* UART enabled */
+#define UCR2_ESCI	(1<<15) /* Escape seq interrupt enable */
+#define UCR2_IRTS	(1<<14) /* Ignore RTS pin */
+#define UCR2_CTSC	(1<<13) /* CTS pin control */
+#define UCR2_CTS	(1<<12) /* Clear to send */
+#define UCR2_ESCEN	(1<<11) /* Escape enable */
+#define UCR2_PREN	(1<<8)	/* Parity enable */
+#define UCR2_PROE	(1<<7)	/* Parity odd/even */
+#define UCR2_STPB	(1<<6)	/* Stop */
+#define UCR2_WS		(1<<5)	/* Word size */
+#define UCR2_RTSEN	(1<<4)	/* Request to send interrupt enable */
+#define UCR2_ATEN	(1<<3)	/* Aging Timer Enable */
+#define UCR2_TXEN	(1<<2)	/* Transmitter enabled */
+#define UCR2_RXEN	(1<<1)	/* Receiver enabled */
+#define UCR2_SRST	(1<<0)	/* SW reset */
+#define UCR3_DTREN	(1<<13) /* DTR interrupt enable */
+#define UCR3_PARERREN	(1<<12) /* Parity enable */
+#define UCR3_FRAERREN	(1<<11) /* Frame error interrupt enable */
+#define UCR3_DSR	(1<<10) /* Data set ready */
+#define UCR3_DCD	(1<<9)	/* Data carrier detect */
+#define UCR3_RI		(1<<8)	/* Ring indicator */
+#define UCR3_ADNIMP	(1<<7)	/* Autobaud Detection Not Improved */
+#define UCR3_RXDSEN	(1<<6)	/* Receive status interrupt enable */
+#define UCR3_AIRINTEN	(1<<5)	/* Async IR wake interrupt enable */
+#define UCR3_AWAKEN	(1<<4)	/* Async wake interrupt enable */
+#define UCR3_DTRDEN	(1<<3)	/* Data Terminal Ready Delta Enable. */
+#define MX1_UCR3_REF25		(1<<3)	/* Ref freq 25 MHz, only on mx1 */
+#define MX1_UCR3_REF30		(1<<2)	/* Ref Freq 30 MHz, only on mx1 */
+#define MX2_UCR3_RXDMUXSEL	(1<<2)	/* RXD Muxed Input Select, on mx2/mx3 */
+#define UCR3_INVT	(1<<1)	/* Inverted Infrared transmission */
+#define UCR3_BPEN	(1<<0)	/* Preset registers enable */
+#define UCR4_CTSTL_SHF	10	/* CTS trigger level shift */
+#define UCR4_CTSTL_MASK	0x3F	/* CTS trigger is 6 bits wide */
+#define UCR4_INVR	(1<<9)	/* Inverted infrared reception */
+#define UCR4_ENIRI	(1<<8)	/* Serial infrared interrupt enable */
+#define UCR4_WKEN	(1<<7)	/* Wake interrupt enable */
+#define UCR4_REF16	(1<<6)	/* Ref freq 16 MHz */
+#define UCR4_IRSC	(1<<5)	/* IR special case */
+#define UCR4_TCEN	(1<<3)	/* Transmit complete interrupt enable */
+#define UCR4_BKEN	(1<<2)	/* Break condition interrupt enable */
+#define UCR4_OREN	(1<<1)	/* Receiver overrun interrupt enable */
+#define UCR4_DREN	(1<<0)	/* Recv data ready interrupt enable */
+#define UFCR_RXTL_SHF	0	/* Receiver trigger level shift */
+#define UFCR_RFDIV	(7<<7)	/* Reference freq divider mask */
+#define UFCR_RFDIV_REG(x)	(((x) < 7 ? 6 - (x) : 6) << 7)
+#define UFCR_TXTL_SHF	10	/* Transmitter trigger level shift */
+#define UFCR_DCEDTE	(1<<6)
+#define USR1_PARITYERR	(1<<15) /* Parity error interrupt flag */
+#define USR1_RTSS	(1<<14) /* RTS pin status */
+#define USR1_TRDY	(1<<13) /* Transmitter ready interrupt/dma flag */
+#define USR1_RTSD	(1<<12) /* RTS delta */
+#define USR1_ESCF	(1<<11) /* Escape seq interrupt flag */
+#define USR1_FRAMERR	(1<<10) /* Frame error interrupt flag */
+#define USR1_RRDY	(1<<9)	/* Receiver ready interrupt/dma flag */
+#define USR1_AGTIM	(1<<8)	/* Ageing Timer Interrupt Flag */
+#define USR1_DTRD	(1<<7)	/* DTR Delta */
+#define USR1_RXDS	(1<<6)	/* Receiver idle interrupt flag */
+#define USR1_AIRINT	(1<<5)	/* Async IR wake interrupt flag */
+#define USR1_AWAKE	(1<<4)	/* Async wake interrupt flag */
+#define USR2_ADET	(1<<15) /* Auto baud rate detect complete */
+#define USR2_TXFE	(1<<14) /* Transmit buffer FIFO empty */
+#define USR2_DTRF	(1<<13) /* DTR edge interrupt flag */
+#define USR2_IDLE	(1<<12) /* Idle condition */
+#define USR2_RIDELT	(1<<10) /* Ring Indicator Delta */
+#define USR2_RIIN	(1<<9)	/* Ring Indicator Input */
+#define USR2_IRINT	(1<<8)	/* Serial infrared interrupt flag */
+#define USR2_WAKE	(1<<7)	/* Wake */
+#define USR2_DCDDELT	(1<<6)	/* Data Carrier Detect Delta */
+#define USR2_DCDIN	(1<<5)	/* Data Carrier Detect Input */
+#define USR2_RTSF	(1<<4)	/* RTS edge interrupt flag */
+#define USR2_TXDC	(1<<3)	/* Transmitter complete */
+#define USR2_BRCD	(1<<2)	/* Break condition */
+#define USR2_ORE	(1<<1)	/* Overrun error */
+#define USR2_RDR	(1<<0)	/* Recv data ready */
+#define UTS_FRCPERR	(1<<13) /* Force parity error */
+#define UTS_LOOP	(1<<12) /* Loop tx and rx */
+#define UTS_TXEMPTY	(1<<6)	/* TxFIFO empty */
+#define UTS_RXEMPTY	(1<<5)	/* RxFIFO empty */
+#define UTS_TXFULL	(1<<4)	/* TxFIFO full */
+#define UTS_RXFULL	(1<<3)	/* RxFIFO full */
+#define UTS_SOFTRST	(1<<0)	/* Software reset */
+
+#define IN_BUFFER_SIZE		4096
+#define OUT_BUFFER_SIZE		4096
+
+#define TX_FIFO_SIZE		32
+
+#define PARITY_MASK		0x03
+#define DATA_BITS_MASK		0x03
+#define STOP_BITS_MASK		0x01
+#define FIFO_MASK		0xC0
+#define EVENT_MASK		0x0F
+
+#define IER_RX			0x01
+#define IER_TX			0x02
+#define IER_STAT		0x04
+#define IER_MODEM		0x08
+
+#define IMX_ISR_PASS_LIMIT	256
+#define UART_CREAD_BIT		256
+
+#define RT_IMX_UART_MAX		5
+
+static int tx_fifo[RT_IMX_UART_MAX];
+module_param_array(tx_fifo, int, NULL, 0400);
+MODULE_PARM_DESC(tx_fifo, "Transmitter FIFO size");
+
+/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
+enum imx_uart_type {
+	IMX1_UART,
+	IMX21_UART,
+	IMX53_UART,
+	IMX6Q_UART,
+};
+
+/* device type dependent stuff */
+struct imx_uart_data {
+	unsigned int uts_reg;
+	enum imx_uart_type devtype;
+};
+
+
+struct rt_imx_uart_port {
+	unsigned char __iomem *membase;	/* read/write[bwl] */
+	resource_size_t mapbase;	/* for ioremap */
+	unsigned int irq;		/* irq number */
+	int tx_fifo;			/* TX fifo size*/
+	unsigned int have_rtscts;
+	unsigned int use_dcedte;
+	unsigned int use_hwflow;
+	struct clk *clk_ipg;		/* clock id for UART clock */
+	struct clk *clk_per;		/* clock id for UART clock */
+	const struct imx_uart_data *devdata;
+	unsigned int uartclk;		/* base uart clock */
+	struct rtdm_device rtdm_dev;	/* RTDM device structure */
+};
+
+
+static struct imx_uart_data imx_uart_devdata[] = {
+	[IMX1_UART] = {
+		.uts_reg = IMX1_UTS,
+		.devtype = IMX1_UART,
+	},
+	[IMX21_UART] = {
+		.uts_reg = IMX21_UTS,
+		.devtype = IMX21_UART,
+	},
+	[IMX53_UART] = {
+		.uts_reg = IMX21_UTS,
+		.devtype = IMX53_UART,
+	},
+	[IMX6Q_UART] = {
+		.uts_reg = IMX21_UTS,
+		.devtype = IMX6Q_UART,
+	},
+};
+
+static const struct platform_device_id rt_imx_uart_id_table[] = {
+	{
+		.name = "imx1-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
+	}, {
+		.name = "imx21-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
+	}, {
+		.name = "imx53-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX53_UART],
+	}, {
+		.name = "imx6q-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART],
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(platform, rt_imx_uart_id_table);
+
+static const struct of_device_id rt_imx_uart_dt_ids[] = {
+	{
+		.compatible = "fsl,imx6q-uart",
+		.data = &imx_uart_devdata[IMX6Q_UART], },
+	{
+		.compatible = "fsl,imx53-uart",
+		.data = &imx_uart_devdata[IMX53_UART], },
+	{
+		.compatible = "fsl,imx1-uart",
+		.data = &imx_uart_devdata[IMX1_UART], },
+	{
+		.compatible = "fsl,imx21-uart",
+		.data = &imx_uart_devdata[IMX21_UART], },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rt_imx_uart_dt_ids);
+
+struct rt_imx_uart_ctx {
+	struct rtser_config config;	/* current device configuration */
+
+	rtdm_irq_t irq_handle;		/* device IRQ handle */
+	rtdm_lock_t lock;		/* lock to protect context struct */
+
+	int in_head;			/* RX ring buffer, head pointer */
+	int in_tail;			/* RX ring buffer, tail pointer */
+	size_t in_npend;		/* pending bytes in RX ring */
+	int in_nwait;			/* bytes the user waits for */
+	rtdm_event_t in_event;		/* raised to unblock reader */
+	char in_buf[IN_BUFFER_SIZE];	/* RX ring buffer */
+
+	volatile unsigned long in_lock;	/* single-reader lock */
+	uint64_t *in_history;		/* RX timestamp buffer */
+
+	int out_head;			/* TX ring buffer, head pointer */
+	int out_tail;			/* TX ring buffer, tail pointer */
+	size_t out_npend;		/* pending bytes in TX ring */
+	rtdm_event_t out_event;		/* raised to unblock writer */
+	char out_buf[OUT_BUFFER_SIZE];	/* TX ring buffer */
+	rtdm_mutex_t out_lock;		/* single-writer mutex */
+
+	uint64_t last_timestamp;	/* timestamp of last event */
+	int ioc_events;			/* recorded events */
+	rtdm_event_t ioc_event;		/* raised to unblock event waiter */
+	volatile unsigned long ioc_event_lock;	/* single-waiter lock */
+
+	int ier_status;			/* IER cache */
+	int mcr_status;			/* MCR cache */
+	int status;			/* cache for LSR + soft-states */
+	int saved_errors;		/* error cache for RTIOC_GET_STATUS */
+
+	/*
+	 * The port structure holds all the information about the UART
+	 * port like base address, and so on.
+	 */
+	struct rt_imx_uart_port *port;
+};
+
+static const struct rtser_config default_config = {
+	.config_mask = 0xFFFF,
+	.baud_rate = RTSER_DEF_BAUD,
+	.parity = RTSER_DEF_PARITY,
+	.data_bits = RTSER_DEF_BITS,
+	.stop_bits = RTSER_DEF_STOPB,
+	.handshake = RTSER_DEF_HAND,
+	.fifo_depth = RTSER_DEF_FIFO_DEPTH,
+	.rx_timeout = RTSER_DEF_TIMEOUT,
+	.tx_timeout = RTSER_DEF_TIMEOUT,
+	.event_timeout = RTSER_DEF_TIMEOUT,
+	.timestamp_history = RTSER_DEF_TIMESTAMP_HISTORY,
+	.event_mask = RTSER_DEF_EVENT_MASK,
+};
+
+static void rt_imx_uart_stop_tx(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long temp;
+
+	temp = readl(ctx->port->membase + UCR1);
+	writel(temp & ~UCR1_TXMPTYEN, ctx->port->membase + UCR1);
+}
+
+static void rt_imx_uart_start_tx(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long temp;
+
+	temp = readl(ctx->port->membase + UCR1);
+	writel(temp | UCR1_TXMPTYEN, ctx->port->membase + UCR1);
+}
+
+static void rt_imx_uart_enable_ms(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long ucr3;
+
+	/*
+	 * RTS interrupt is enabled only if we are using interrupt-driven
+	 * software controlled hardware flow control
+	 */
+	if (!ctx->port->use_hwflow) {
+		unsigned long ucr1 = readl(ctx->port->membase + UCR1);
+
+		ucr1 |= UCR1_RTSDEN;
+		writel(ucr1, ctx->port->membase + UCR1);
+	}
+	ucr3 = readl(ctx->port->membase + UCR3);
+	ucr3 |= UCR3_DTREN;
+	if (ctx->port->use_dcedte) /* DTE mode */
+		ucr3 |= UCR3_DCD | UCR3_RI;
+	writel(ucr3, ctx->port->membase + UCR3);
+}
+
+static int rt_imx_uart_rx_chars(struct rt_imx_uart_ctx *ctx,
+				uint64_t *timestamp)
+{
+	unsigned int rx, temp;
+	int rbytes = 0;
+	int lsr = 0;
+
+	while (readl(ctx->port->membase + USR2) & USR2_RDR) {
+		rx = readl(ctx->port->membase + URXD0);
+		temp = readl(ctx->port->membase + USR2);
+		if (temp & USR2_BRCD) {
+			writel(USR2_BRCD, ctx->port->membase + USR2);
+			lsr |= RTSER_LSR_BREAK_IND;
+		}
+
+		if (rx & (URXD_PRERR | URXD_OVRRUN | URXD_FRMERR)) {
+			if (rx & URXD_PRERR)
+				lsr |= RTSER_LSR_PARITY_ERR;
+			else if (rx & URXD_FRMERR)
+				lsr |= RTSER_LSR_FRAMING_ERR;
+			if (rx & URXD_OVRRUN)
+				lsr |= RTSER_LSR_OVERRUN_ERR;
+		}
+
+		/* save received character */
+		ctx->in_buf[ctx->in_tail] = rx & 0xff;
+		if (ctx->in_history)
+			ctx->in_history[ctx->in_tail] = *timestamp;
+		ctx->in_tail = (ctx->in_tail + 1) & (IN_BUFFER_SIZE - 1);
+
+		if (unlikely(ctx->in_npend >= IN_BUFFER_SIZE))
+			lsr |= RTSER_SOFT_OVERRUN_ERR;
+		else
+			ctx->in_npend++;
+
+		rbytes++;
+	}
+
+	/* save new errors */
+	ctx->status |= lsr;
+
+	return rbytes;
+}
+
+static void rt_imx_uart_tx_chars(struct rt_imx_uart_ctx *ctx)
+{
+	int ch;
+	unsigned int uts_reg = ctx->port->devdata->uts_reg;
+
+	while (ctx->out_npend > 0 &&
+	       !(readl(ctx->port->membase + uts_reg) & UTS_TXFULL)) {
+		ch = ctx->out_buf[ctx->out_head++];
+		writel(ch, ctx->port->membase + URTX0);
+		ctx->out_head &= (OUT_BUFFER_SIZE - 1);
+		ctx->out_npend--;
+	}
+}
+
+static int rt_imx_uart_modem_status(struct rt_imx_uart_ctx *ctx,
+				     unsigned int usr1,
+				     unsigned int usr2)
+{
+	int events = 0;
+
+	/* Clear the status bits that triggered the interrupt */
+	writel(usr1, ctx->port->membase + USR1);
+	writel(usr2, ctx->port->membase + USR2);
+
+	if (ctx->port->use_dcedte) { /* DTE mode */
+		if (usr2 & USR2_DCDDELT)
+			events |= !(usr2 & USR2_DCDIN) ?
+				RTSER_EVENT_MODEMHI : RTSER_EVENT_MODEMLO;
+	}
+	if (!ctx->port->use_hwflow && (usr1 & USR1_RTSD)) {
+		events |= (usr1 & USR1_RTSS) ?
+			RTSER_EVENT_MODEMHI : RTSER_EVENT_MODEMLO;
+	}
+
+	return events;
+}
+
+static int rt_imx_uart_int(rtdm_irq_t *irq_context)
+{
+	uint64_t timestamp = rtdm_clock_read();
+	struct rt_imx_uart_ctx *ctx;
+	unsigned int usr1, usr2, ucr1;
+	int rbytes = 0, events = 0;
+	int ret = RTDM_IRQ_NONE;
+
+	ctx = rtdm_irq_get_arg(irq_context, struct rt_imx_uart_ctx);
+
+	rtdm_lock_get(&ctx->lock);
+
+	usr1 = readl(ctx->port->membase + USR1);
+	usr2 = readl(ctx->port->membase + USR2);
+	ucr1 = readl(ctx->port->membase + UCR1);
+
+	/*
+	 * Read if there is data available
+	 */
+	if (usr1 & USR1_RRDY) {
+		if (likely(ucr1 & UCR1_RRDYEN)) {
+			rbytes = rt_imx_uart_rx_chars(ctx, &timestamp);
+			events |= RTSER_EVENT_RXPEND;
+		}
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	/*
+	 * Send data if there is data to be sent
+	 */
+	if (usr1 & USR1_TRDY) {
+		if (likely(ucr1 & UCR1_TXMPTYEN))
+			rt_imx_uart_tx_chars(ctx);
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	/*
+	 * Handle modem status events
+	 */
+	if ((usr1 & (USR1_RTSD | USR1_DTRD)) ||
+	    (usr2 & (USR2_DCDDELT | USR2_RIDELT))) {
+		events |= rt_imx_uart_modem_status(ctx, usr1, usr2);
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	if (ctx->in_nwait > 0) {
+		if ((ctx->in_nwait <= rbytes) || ctx->status) {
+			ctx->in_nwait = 0;
+			rtdm_event_signal(&ctx->in_event);
+		} else {
+			ctx->in_nwait -= rbytes;
+		}
+	}
+
+	if (ctx->status) {
+		events |= RTSER_EVENT_ERRPEND;
+#ifdef FIXME
+		ctx->ier_status &= ~IER_STAT;
+#endif
+	}
+
+	if (events & ctx->config.event_mask) {
+		int old_events = ctx->ioc_events;
+
+		ctx->last_timestamp = timestamp;
+		ctx->ioc_events = events;
+
+		if (!old_events)
+			rtdm_event_signal(&ctx->ioc_event);
+	}
+
+	if ((ctx->ier_status & IER_TX) && (ctx->out_npend == 0)) {
+		rt_imx_uart_stop_tx(ctx);
+		ctx->ier_status &= ~IER_TX;
+		rtdm_event_signal(&ctx->out_event);
+	}
+
+	rtdm_lock_put(&ctx->lock);
+
+	if (ret != RTDM_IRQ_HANDLED)
+		pr_warn("%s: unhandled interrupt\n", __func__);
+	return ret;
+}
+
+static unsigned int rt_imx_uart_get_msr(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long usr1 = readl(ctx->port->membase + USR1);
+	unsigned long usr2 = readl(ctx->port->membase + USR2);
+	unsigned int msr = 0;
+
+	if (usr1 & USR1_RTSD)
+		msr |= RTSER_MSR_DCTS;
+	if (usr1 & USR1_DTRD)
+		msr |= RTSER_MSR_DDSR;
+	if (usr2 & USR2_RIDELT)
+		msr |= RTSER_MSR_TERI;
+	if (usr2 & USR2_DCDDELT)
+		msr |= RTSER_MSR_DDCD;
+
+	if (usr1 & USR1_RTSS)
+		msr |= RTSER_MSR_CTS;
+
+	if (ctx->port->use_dcedte) { /* DTE mode */
+		if (!(usr2 & USR2_DCDIN))
+			msr |= RTSER_MSR_DCD;
+		if (!(usr2 & USR2_RIIN))
+			msr |= RTSER_MSR_RI;
+	}
+
+	return msr;
+}
+
+static void rt_imx_uart_set_mcr(struct rt_imx_uart_ctx *ctx,
+				unsigned int mcr)
+{
+	unsigned int uts_reg = ctx->port->devdata->uts_reg;
+	unsigned long ucr2 = readl(ctx->port->membase + UCR2);
+	unsigned long ucr3 = readl(ctx->port->membase + UCR3);
+	unsigned long uts = readl(ctx->port->membase + uts_reg);
+
+	if (mcr & RTSER_MCR_RTS) {
+		/*
+		 * Return to hardware-driven hardware flow control if the
+		 * option is enabled
+		 */
+		if (ctx->port->use_hwflow) {
+			ucr2 |= UCR2_CTSC;
+		} else {
+			ucr2 |= UCR2_CTS;
+			ucr2 &= ~UCR2_CTSC;
+		}
+	} else {
+		ucr2 &= ~(UCR2_CTS | UCR2_CTSC);
+	}
+	writel(ucr2, ctx->port->membase + UCR2);
+
+	if (mcr & RTSER_MCR_DTR)
+		ucr3 |= UCR3_DSR;
+	else
+		ucr3 &= ~UCR3_DSR;
+	writel(ucr3, ctx->port->membase + UCR3);
+
+	if (mcr & RTSER_MCR_LOOP)
+		uts |= UTS_LOOP;
+	else
+		uts &= ~UTS_LOOP;
+	writel(uts, ctx->port->membase + uts_reg);
+}
+
+static void rt_imx_uart_break_ctl(struct rt_imx_uart_ctx *ctx,
+				  int break_state)
+{
+	unsigned long ucr1 = readl(ctx->port->membase + UCR1);
+
+	if (break_state == RTSER_BREAK_SET)
+		ucr1 |= UCR1_SNDBRK;
+	else
+		ucr1 &= ~UCR1_SNDBRK;
+	writel(ucr1, ctx->port->membase + UCR1);
+}
+
+static int rt_imx_uart_set_config(struct rt_imx_uart_ctx *ctx,
+				  const struct rtser_config *config,
+				  uint64_t **in_history_ptr)
+{
+	rtdm_lockctx_t lock_ctx;
+	int err = 0;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	if (config->config_mask & RTSER_SET_BAUD)
+		ctx->config.baud_rate = config->baud_rate;
+	if (config->config_mask & RTSER_SET_DATA_BITS)
+		ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
+	if (config->config_mask & RTSER_SET_PARITY)
+		ctx->config.parity = config->parity & PARITY_MASK;
+	if (config->config_mask & RTSER_SET_STOP_BITS)
+		ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
+
+	/* Timeout manipulation is not atomic. The user is supposed to take
+	 * care not to use and change timeouts at the same time.
+	 */
+	if (config->config_mask & RTSER_SET_TIMEOUT_RX)
+		ctx->config.rx_timeout = config->rx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_TX)
+		ctx->config.tx_timeout = config->tx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
+		ctx->config.event_timeout = config->event_timeout;
+
+	if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+		if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
+			if (!ctx->in_history) {
+				ctx->in_history = *in_history_ptr;
+				*in_history_ptr = NULL;
+				if (!ctx->in_history)
+					err = -ENOMEM;
+			}
+		} else {
+			*in_history_ptr = ctx->in_history;
+			ctx->in_history = NULL;
+		}
+	}
+
+	if (config->config_mask & RTSER_SET_EVENT_MASK) {
+		ctx->config.event_mask = config->event_mask & EVENT_MASK;
+		ctx->ioc_events = 0;
+
+		if ((config->event_mask & RTSER_EVENT_RXPEND) &&
+		    (ctx->in_npend > 0))
+			ctx->ioc_events |= RTSER_EVENT_RXPEND;
+
+		if ((config->event_mask & RTSER_EVENT_ERRPEND)
+		    && ctx->status)
+			ctx->ioc_events |= RTSER_EVENT_ERRPEND;
+	}
+
+	if (config->config_mask & RTSER_SET_HANDSHAKE) {
+		ctx->config.handshake = config->handshake;
+
+		switch (ctx->config.handshake) {
+		case RTSER_RTSCTS_HAND:
+			/* ...? */
+
+		default:	/* RTSER_NO_HAND */
+			ctx->mcr_status = RTSER_MCR_RTS | RTSER_MCR_OUT1;
+			break;
+		}
+		rt_imx_uart_set_mcr(ctx, ctx->mcr_status);
+	}
+
+	/* configure hardware with new parameters */
+	if (config->config_mask & (RTSER_SET_BAUD |
+				   RTSER_SET_PARITY |
+				   RTSER_SET_DATA_BITS |
+				   RTSER_SET_STOP_BITS |
+				   RTSER_SET_EVENT_MASK |
+				   RTSER_SET_HANDSHAKE)) {
+		struct rt_imx_uart_port *port = ctx->port;
+		unsigned int ucr2, old_ucr1, old_txrxen, old_ucr2;
+		unsigned int baud = ctx->config.baud_rate;
+		unsigned int div, ufcr;
+		unsigned long num, denom;
+		uint64_t tdiv64;
+
+		if (ctx->config.data_bits == RTSER_8_BITS)
+			ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
+		else
+			ucr2 = UCR2_SRST | UCR2_IRTS;
+
+		if (ctx->config.handshake == RTSER_RTSCTS_HAND) {
+			if (port->have_rtscts) {
+				ucr2 &= ~UCR2_IRTS;
+				ucr2 |= UCR2_CTSC;
+			}
+		}
+
+		if (ctx->config.stop_bits == RTSER_2_STOPB)
+			ucr2 |= UCR2_STPB;
+		if (ctx->config.parity == RTSER_ODD_PARITY ||
+		    ctx->config.parity == RTSER_EVEN_PARITY) {
+			ucr2 |= UCR2_PREN;
+			if (ctx->config.parity == RTSER_ODD_PARITY)
+				ucr2 |= UCR2_PROE;
+		}
+
+		/*
+		 * disable interrupts and drain transmitter
+		 */
+		old_ucr1 = readl(port->membase + UCR1);
+		old_ucr1 &= ~UCR1_RTSDEN; /* reset in  rt_imx_uart_enable_ms()*/
+		writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN),
+		       port->membase + UCR1);
+		old_ucr2 = readl(port->membase + USR2);
+		writel(old_ucr2 & ~UCR2_ATEN, port->membase + USR2);
+		while (!(readl(port->membase + USR2) & USR2_TXDC))
+			barrier();
+
+		/* then, disable everything */
+		old_txrxen = readl(port->membase + UCR2);
+		writel(old_txrxen & ~(UCR2_TXEN | UCR2_RXEN),
+		       port->membase + UCR2);
+		old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
+		div = port->uartclk / (baud * 16);
+		if (div > 7)
+			div = 7;
+		if (!div)
+			div = 1;
+
+		rational_best_approximation(16 * div * baud, port->uartclk,
+					    1 << 16, 1 << 16, &num, &denom);
+
+		tdiv64 = port->uartclk;
+		tdiv64 *= num;
+		do_div(tdiv64, denom * 16 * div);
+
+		num -= 1;
+		denom -= 1;
+
+		ufcr = readl(port->membase + UFCR);
+		ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
+
+		if (port->use_dcedte)
+			ufcr |= UFCR_DCEDTE;
+
+		writel(ufcr, port->membase + UFCR);
+
+		writel(num, port->membase + UBIR);
+		writel(denom, port->membase + UBMR);
+
+		writel(port->uartclk / div / 1000, port->membase + MX2_ONEMS);
+
+		writel(old_ucr1, port->membase + UCR1);
+
+		/* set the parity, stop bits and data size */
+		writel(ucr2 | old_txrxen, port->membase + UCR2);
+
+		if (config->event_mask &
+		    (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+			rt_imx_uart_enable_ms(ctx);
+
+		ctx->status = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	return err;
+}
+
+void rt_imx_uart_cleanup_ctx(struct rt_imx_uart_ctx *ctx)
+{
+	rtdm_event_destroy(&ctx->in_event);
+	rtdm_event_destroy(&ctx->out_event);
+	rtdm_event_destroy(&ctx->ioc_event);
+	rtdm_mutex_destroy(&ctx->out_lock);
+}
+
+#define TXTL 2 /* reset default */
+#define RXTL 1 /* reset default */
+
+static int rt_imx_uart_setup_ufcr(struct rt_imx_uart_port *port)
+{
+	unsigned int val;
+	unsigned int ufcr_rfdiv;
+
+	/* set receiver / transmitter trigger level.
+	 * RFDIV is set such way to satisfy requested uartclk value
+	 */
+	val = TXTL << 10 | RXTL;
+	ufcr_rfdiv = (clk_get_rate(port->clk_per) + port->uartclk / 2) /
+		port->uartclk;
+
+	if (!ufcr_rfdiv)
+		ufcr_rfdiv = 1;
+
+	val |= UFCR_RFDIV_REG(ufcr_rfdiv);
+
+	writel(val, port->membase + UFCR);
+
+	return 0;
+}
+
+/* half the RX buffer size */
+#define CTSTL 16
+
+static void uart_reset(struct rt_imx_uart_port *port)
+{
+	unsigned int uts_reg = port->devdata->uts_reg;
+	int n = 100;
+	u32 temp;
+
+	/* Reset fifo's and state machines */
+	temp = readl(port->membase + UCR2);
+	temp &= ~UCR2_SRST;
+	writel(temp, port->membase + UCR2);
+	n = 100;
+	while (!(readl(port->membase + uts_reg) & UTS_SOFTRST) && --n > 0)
+		udelay(1);
+}
+
+static int rt_imx_uart_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_imx_uart_ctx *ctx;
+	struct rt_imx_uart_port *port;
+	rtdm_lockctx_t lock_ctx;
+	unsigned long temp;
+	uint64_t *dummy;
+
+	ctx = rtdm_fd_to_private(fd);
+	ctx->port = (struct rt_imx_uart_port *)rtdm_fd_device(fd)->device_data;
+
+	port = ctx->port;
+
+	/* IPC initialisation - cannot fail with used parameters */
+	rtdm_lock_init(&ctx->lock);
+	rtdm_event_init(&ctx->in_event, 0);
+	rtdm_event_init(&ctx->out_event, 0);
+	rtdm_event_init(&ctx->ioc_event, 0);
+	rtdm_mutex_init(&ctx->out_lock);
+
+	ctx->in_head = 0;
+	ctx->in_tail = 0;
+	ctx->in_npend = 0;
+	ctx->in_nwait = 0;
+	ctx->in_lock = 0;
+	ctx->in_history = NULL;
+
+	ctx->out_head = 0;
+	ctx->out_tail = 0;
+	ctx->out_npend = 0;
+
+	ctx->ioc_events = 0;
+	ctx->ioc_event_lock = 0;
+	ctx->status = 0;
+	ctx->saved_errors = 0;
+
+	/*
+	 * disable the DREN bit (Data Ready interrupt enable) before
+	 * requesting IRQs
+	 */
+	temp = readl(port->membase + UCR4);
+
+	/* set the trigger level for CTS */
+	temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
+	temp |= CTSTL << UCR4_CTSTL_SHF;
+	writel(temp & ~UCR4_DREN, port->membase + UCR4);
+
+	uart_reset(port);
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/*
+	 * Finally, clear status and enable interrupts
+	 */
+	writel(USR1_RTSD | USR1_DTRD, port->membase + USR1);
+	writel(USR2_ORE, port->membase + USR2);
+
+	temp = readl(port->membase + UCR1) & ~UCR1_RRDYEN;
+	temp |= UCR1_UARTEN;
+	if (port->have_rtscts)
+		temp |= UCR1_RTSDEN;
+	writel(temp, port->membase + UCR1);
+
+	temp = readl(port->membase + UCR4);
+	temp |= UCR4_OREN;
+	writel(temp, port->membase + UCR4);
+
+	temp = readl(port->membase + UCR2) & ~(UCR2_ATEN|UCR2_RTSEN);
+	temp |= (UCR2_RXEN | UCR2_TXEN);
+	if (!port->have_rtscts)
+		temp |= UCR2_IRTS;
+	writel(temp, port->membase + UCR2);
+
+	temp = readl(port->membase + UCR3);
+	temp |= MX2_UCR3_RXDMUXSEL;
+	writel(temp, port->membase + UCR3);
+
+	temp = readl(port->membase + UCR1);
+	temp |= UCR1_RRDYEN;
+	writel(temp, port->membase + UCR1);
+
+	temp = readl(port->membase + UCR2);
+	temp |= UCR2_ATEN;
+	writel(temp, port->membase + UCR2);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rt_imx_uart_set_config(ctx, &default_config, &dummy);
+
+	rt_imx_uart_setup_ufcr(port);
+
+	return rtdm_irq_request(&ctx->irq_handle,
+				port->irq, rt_imx_uart_int, 0,
+				rtdm_fd_device(fd)->name, ctx);
+}
+
+void rt_imx_uart_close(struct rtdm_fd *fd)
+{
+	struct rt_imx_uart_port *port;
+	struct rt_imx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	unsigned long temp;
+
+	ctx = rtdm_fd_to_private(fd);
+	port = ctx->port;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	temp = readl(port->membase + UCR2);
+	temp &= ~(UCR2_ATEN|UCR2_RTSEN|UCR2_RXEN|UCR2_TXEN|UCR2_IRTS);
+	writel(temp, port->membase + UCR2);
+	/*
+	 * Disable all interrupts, port and break condition, then
+	 * reset.
+	 */
+	temp = readl(port->membase + UCR1);
+	temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
+	writel(temp, port->membase + UCR1);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rtdm_irq_free(&ctx->irq_handle);
+
+	uart_reset(port);
+
+	rt_imx_uart_cleanup_ctx(ctx);
+	kfree(ctx->in_history);
+}
+
+static int rt_imx_uart_ioctl(struct rtdm_fd *fd,
+			     unsigned int request, void *arg)
+{
+	rtdm_lockctx_t lock_ctx;
+	struct rt_imx_uart_ctx *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTSER_RTIOC_GET_CONFIG:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->config,
+						   sizeof(struct rtser_config));
+		else
+			memcpy(arg, &ctx->config,
+			       sizeof(struct rtser_config));
+		break;
+
+	case RTSER_RTIOC_SET_CONFIG: {
+		struct rtser_config *config;
+		struct rtser_config config_buf;
+		uint64_t *hist_buf = NULL;
+
+		/*
+		 * We may call regular kernel services ahead, ask for
+		 * re-entering secondary mode if need be.
+		 */
+		if (rtdm_in_rt_context())
+			return -ENOSYS;
+
+		config = (struct rtser_config *)arg;
+
+		if (rtdm_fd_is_user(fd)) {
+			err =
+			    rtdm_safe_copy_from_user(fd, &config_buf,
+						     arg,
+						     sizeof(struct
+							    rtser_config));
+			if (err)
+				return err;
+
+			config = &config_buf;
+		}
+
+		if ((config->config_mask & RTSER_SET_BAUD) &&
+		    (config->baud_rate > clk_get_rate(ctx->port->clk_per) / 16 ||
+		     config->baud_rate <= 0))
+			/* invalid baudrate for this port */
+			return -EINVAL;
+
+		if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+			if (config->timestamp_history &
+						RTSER_RX_TIMESTAMP_HISTORY)
+				hist_buf = kmalloc(IN_BUFFER_SIZE *
+						   sizeof(nanosecs_abs_t),
+						   GFP_KERNEL);
+		}
+
+		rt_imx_uart_set_config(ctx, config, &hist_buf);
+
+		if (hist_buf)
+			kfree(hist_buf);
+		break;
+	}
+
+	case RTSER_RTIOC_GET_STATUS: {
+		int status, msr;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		status = ctx->saved_errors | ctx->status;
+		ctx->status = 0;
+		ctx->saved_errors = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+		msr = rt_imx_uart_get_msr(ctx);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rtser_status status_buf;
+
+
+			status_buf.line_status = status;
+			status_buf.modem_status = msr;
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &status_buf,
+						   sizeof(struct
+							  rtser_status));
+		} else {
+			((struct rtser_status *)arg)->line_status = 0;
+			((struct rtser_status *)arg)->modem_status = msr;
+		}
+		break;
+	}
+
+	case RTSER_RTIOC_GET_CONTROL:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->mcr_status,
+						   sizeof(int));
+		else
+			*(int *)arg = ctx->mcr_status;
+
+		break;
+
+	case RTSER_RTIOC_SET_CONTROL: {
+		int new_mcr = (long)arg;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		ctx->mcr_status = new_mcr;
+		rt_imx_uart_set_mcr(ctx, new_mcr);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTSER_RTIOC_WAIT_EVENT: {
+		struct rtser_event ev = { .rxpend_timestamp = 0 };
+		rtdm_toseq_t timeout_seq;
+
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		/* Only one waiter allowed, stop any further attempts here. */
+		if (test_and_set_bit(0, &ctx->ioc_event_lock))
+			return -EBUSY;
+
+		rtdm_toseq_init(&timeout_seq, ctx->config.event_timeout);
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		while (!ctx->ioc_events) {
+			/* Only enable error interrupt
+			 * when the user waits for it.
+			 */
+			if (ctx->config.event_mask & RTSER_EVENT_ERRPEND) {
+				ctx->ier_status |= IER_STAT;
+#ifdef FIXME
+				rt_imx_uart_reg_out(mode, base, IER,
+						 ctx->ier_status);
+#endif
+			}
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			err = rtdm_event_timedwait(&ctx->ioc_event,
+						   ctx->config.event_timeout,
+						   &timeout_seq);
+			if (err) {
+				/* Device has been closed? */
+				if (err == -EIDRM)
+					err = -EBADF;
+				goto wait_unlock_out;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		}
+
+		ev.events = ctx->ioc_events;
+		ctx->ioc_events &=
+		    ~(RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO);
+
+		ev.last_timestamp = ctx->last_timestamp;
+		ev.rx_pending = ctx->in_npend;
+
+		if (ctx->in_history)
+			ev.rxpend_timestamp = ctx->in_history[ctx->in_head];
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg, &ev,
+						   sizeof(struct
+							  rtser_event));
+			else
+				memcpy(arg, &ev, sizeof(struct rtser_event));
+
+wait_unlock_out:
+		/* release the simple event waiter lock */
+		clear_bit(0, &ctx->ioc_event_lock);
+		break;
+	}
+
+	case RTSER_RTIOC_BREAK_CTL: {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		rt_imx_uart_break_ctl(ctx, (unsigned long)arg);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+#ifdef FIXME
+	case RTIOC_PURGE: {
+		int fcr = 0;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTDM_PURGE_RX_BUFFER) {
+			ctx->in_head = 0;
+			ctx->in_tail = 0;
+			ctx->in_npend = 0;
+			ctx->status = 0;
+			fcr |= FCR_FIFO | FCR_RESET_RX;
+			rt_imx_uart_reg_in(mode, base, RHR);
+		}
+		if ((long)arg & RTDM_PURGE_TX_BUFFER) {
+			ctx->out_head = 0;
+			ctx->out_tail = 0;
+			ctx->out_npend = 0;
+			fcr |= FCR_FIFO | FCR_RESET_TX;
+		}
+		if (fcr) {
+			rt_imx_uart_reg_out(mode, base, FCR, fcr);
+			rt_imx_uart_reg_out(mode, base, FCR,
+					 FCR_FIFO | ctx->config.fifo_depth);
+		}
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+#endif
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+ssize_t rt_imx_uart_read(struct rtdm_fd *fd, void *buf, size_t nbyte)
+{
+	struct rt_imx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t read = 0;
+	int pending;
+	int block;
+	int subblock;
+	int in_pos;
+	char *out_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret = -EAGAIN;	/* for non-blocking read */
+	int nonblocking;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_rw_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* non-blocking is handled separately here */
+	nonblocking = (ctx->config.rx_timeout < 0);
+
+	/* only one reader allowed, stop any further attempts here */
+	if (test_and_set_bit(0, &ctx->in_lock))
+		return -EBUSY;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	while (1) {
+		if (ctx->status) {
+			if (ctx->status & RTSER_LSR_BREAK_IND)
+				ret = -EPIPE;
+			else
+				ret = -EIO;
+			ctx->saved_errors = ctx->status &
+			    (RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			     RTSER_LSR_FRAMING_ERR | RTSER_SOFT_OVERRUN_ERR);
+			ctx->status = 0;
+			break;
+		}
+
+		pending = ctx->in_npend;
+
+		if (pending > 0) {
+			block = subblock = (pending <= nbyte) ? pending : nbyte;
+			in_pos = ctx->in_head;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (in_pos + subblock > IN_BUFFER_SIZE) {
+				/* Treat the block between head and buffer end
+				 * separately.
+				 */
+				subblock = IN_BUFFER_SIZE - in_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_to_user
+					    (fd, out_pos,
+					     &ctx->in_buf[in_pos],
+					     subblock) != 0) {
+						ret = -EFAULT;
+						goto break_unlocked;
+					}
+				} else
+					memcpy(out_pos, &ctx->in_buf[in_pos],
+					       subblock);
+
+				read += subblock;
+				out_pos += subblock;
+
+				subblock = block - subblock;
+				in_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_to_user(fd, out_pos,
+						      &ctx->in_buf[in_pos],
+						      subblock) != 0) {
+					ret = -EFAULT;
+					goto break_unlocked;
+				}
+			} else
+				memcpy(out_pos, &ctx->in_buf[in_pos], subblock);
+
+			read += subblock;
+			out_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->in_head =
+			    (ctx->in_head + block) & (IN_BUFFER_SIZE - 1);
+			ctx->in_npend -= block;
+			if (ctx->in_npend == 0)
+				ctx->ioc_events &= ~RTSER_EVENT_RXPEND;
+
+			if (nbyte == 0)
+				break; /* All requested bytes read. */
+
+			continue;
+		}
+
+		if (nonblocking)
+			/* ret was set to EAGAIN in case of a real
+			 * non-blocking call or contains the error
+			 * returned by rtdm_event_wait[_until]
+			 */
+			break;
+
+		ctx->in_nwait = nbyte;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->in_event,
+					   ctx->config.rx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				 * return immediately.
+				 */
+				return -EBADF;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			nonblocking = 1;
+			if (ctx->in_npend > 0) {
+				/* Final turn: collect pending bytes
+				 * before exit.
+				 */
+				continue;
+			}
+
+			ctx->in_nwait = 0;
+			break;
+		}
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+break_unlocked:
+	/* Release the simple reader lock, */
+	clear_bit(0, &ctx->in_lock);
+
+	if ((read > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			   (ret == -ETIMEDOUT)))
+		ret = read;
+
+	return ret;
+}
+
+static ssize_t rt_imx_uart_write(struct rtdm_fd *fd, const void *buf,
+				size_t nbyte)
+{
+	struct rt_imx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t written = 0;
+	int free;
+	int block;
+	int subblock;
+	int out_pos;
+	char *in_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_read_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* Make write operation atomic. */
+	ret = rtdm_mutex_timedlock(&ctx->out_lock, ctx->config.rx_timeout,
+				   &timeout_seq);
+	if (ret)
+		return ret;
+
+	while (nbyte > 0) {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		free = OUT_BUFFER_SIZE - ctx->out_npend;
+
+		if (free > 0) {
+			block = subblock = (nbyte <= free) ? nbyte : free;
+			out_pos = ctx->out_tail;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (out_pos + subblock > OUT_BUFFER_SIZE) {
+				/* Treat the block between head and buffer
+				 * end separately.
+				 */
+				subblock = OUT_BUFFER_SIZE - out_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_from_user
+					    (fd,
+					     &ctx->out_buf[out_pos],
+					     in_pos, subblock) != 0) {
+						ret = -EFAULT;
+						break;
+					}
+				} else
+					memcpy(&ctx->out_buf[out_pos], in_pos,
+					       subblock);
+
+				written += subblock;
+				in_pos += subblock;
+
+				subblock = block - subblock;
+				out_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_from_user
+				    (fd, &ctx->out_buf[out_pos],
+				     in_pos, subblock) != 0) {
+					ret = -EFAULT;
+					break;
+				}
+			} else
+				memcpy(&ctx->out_buf[out_pos], in_pos, block);
+
+			written += subblock;
+			in_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->out_tail =
+			    (ctx->out_tail + block) & (OUT_BUFFER_SIZE - 1);
+			ctx->out_npend += block;
+
+			ctx->ier_status |= IER_TX;
+			rt_imx_uart_start_tx(ctx);
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+			continue;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->out_event,
+					   ctx->config.tx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				 * return immediately.
+				 */
+				ret = -EBADF;
+			}
+			break;
+		}
+	}
+
+	rtdm_mutex_unlock(&ctx->out_lock);
+
+	if ((written > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			      (ret == -ETIMEDOUT)))
+		ret = written;
+
+	return ret;
+}
+
+static struct rtdm_driver imx_uart_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(imx_uart,
+						    RTDM_CLASS_SERIAL,
+						    RTDM_SUBCLASS_16550A,
+						    RTSER_PROFILE_VER),
+	.device_count		= RT_IMX_UART_MAX,
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.context_size		= sizeof(struct rt_imx_uart_ctx),
+	.ops = {
+		.open		= rt_imx_uart_open,
+		.close		= rt_imx_uart_close,
+		.ioctl_rt	= rt_imx_uart_ioctl,
+		.ioctl_nrt	= rt_imx_uart_ioctl,
+		.read_rt	= rt_imx_uart_read,
+		.write_rt	= rt_imx_uart_write,
+	},
+};
+
+
+#ifdef CONFIG_OF
+
+/*
+ * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
+ * could successfully get all information from dt or a negative errno.
+ */
+static int rt_imx_uart_probe_dt(struct rt_imx_uart_port *port,
+				struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	const struct of_device_id *of_id =
+			of_match_device(rt_imx_uart_dt_ids, &pdev->dev);
+	int ret;
+
+	if (!np)
+		/* no device tree device */
+		return 1;
+
+	ret = of_alias_get_id(np, "serial");
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+		return ret;
+	}
+
+	pdev->id = ret;
+
+	if (of_get_property(np, "uart-has-rtscts", NULL) ||
+	    of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */)
+		port->have_rtscts = 1;
+	if (of_get_property(np, "fsl,irda-mode", NULL))
+		dev_warn(&pdev->dev, "IRDA not yet supported\n");
+
+	if (of_get_property(np, "fsl,dte-mode", NULL))
+		port->use_dcedte = 1;
+
+	port->devdata = of_id->data;
+
+	return 0;
+}
+#else
+static inline int rt_imx_uart_probe_dt(struct rt_imx_uart_port *port,
+				       struct platform_device *pdev)
+{
+	return 1;
+}
+#endif
+
+static void rt_imx_uart_probe_pdata(struct rt_imx_uart_port *port,
+				    struct platform_device *pdev)
+{
+	struct imxuart_platform_data *pdata = dev_get_platdata(&pdev->dev);
+
+	port->devdata = (struct imx_uart_data  *) pdev->id_entry->driver_data;
+
+	if (!pdata)
+		return;
+
+	if (pdata->flags & IMXUART_HAVE_RTSCTS)
+		port->have_rtscts = 1;
+}
+
+static int rt_imx_uart_probe(struct platform_device *pdev)
+{
+	struct rtdm_device *dev;
+	struct rt_imx_uart_port *port;
+	struct resource *res;
+	int ret;
+
+	port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	ret = rt_imx_uart_probe_dt(port, pdev);
+	if (ret > 0)
+		rt_imx_uart_probe_pdata(port, pdev);
+	else if (ret < 0)
+		return ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	port->irq = platform_get_irq(pdev, 0);
+
+	if (port->irq <= 0)
+		return -ENODEV;
+
+	port->membase = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(port->membase))
+		return PTR_ERR(port->membase);
+
+	dev = &port->rtdm_dev;
+	dev->driver = &imx_uart_driver;
+	dev->label = "rtser%d";
+	dev->device_data = port;
+
+	if (!tx_fifo[pdev->id] || tx_fifo[pdev->id] > TX_FIFO_SIZE)
+		port->tx_fifo = TX_FIFO_SIZE;
+	else
+		port->tx_fifo = tx_fifo[pdev->id];
+
+	port->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(port->clk_ipg))
+		return PTR_ERR(port->clk_ipg);
+
+	port->clk_per = devm_clk_get(&pdev->dev, "per");
+	if (IS_ERR(port->clk_per))
+		return PTR_ERR(port->clk_per);
+
+	clk_prepare_enable(port->clk_ipg);
+	clk_prepare_enable(port->clk_per);
+	port->uartclk = clk_get_rate(port->clk_per);
+
+	port->use_hwflow = 1;
+
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, port);
+
+	pr_info("%s on IMX UART%d: membase=0x%p irq=%d uartclk=%d\n",
+	       dev->name, pdev->id, port->membase, port->irq, port->uartclk);
+	return 0;
+}
+
+static int rt_imx_uart_remove(struct platform_device *pdev)
+{
+	struct imxuart_platform_data *pdata;
+	struct rt_imx_uart_port *port = platform_get_drvdata(pdev);
+	struct rtdm_device *dev = &port->rtdm_dev;
+
+	pdata = pdev->dev.platform_data;
+	platform_set_drvdata(pdev, NULL);
+
+	clk_disable_unprepare(port->clk_ipg);
+	clk_disable_unprepare(port->clk_per);
+	rtdm_dev_unregister(dev);
+
+	return 0;
+}
+
+static struct platform_driver rt_imx_uart_driver = {
+	.probe = rt_imx_uart_probe,
+	.remove	= rt_imx_uart_remove,
+	.id_table = rt_imx_uart_id_table,
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = rt_imx_uart_dt_ids,
+	},
+	.prevent_deferred_probe = true,
+};
+
+
+static int __init rt_imx_uart_init(void)
+{
+	int ret;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	ret = platform_driver_register(&rt_imx_uart_driver);
+	if (ret) {
+		pr_err("%s; Could not register  driver (err=%d)\n",
+			__func__, ret);
+	}
+
+	return ret;
+}
+
+static void __exit rt_imx_uart_exit(void)
+{
+	platform_driver_unregister(&rt_imx_uart_driver);
+}
+
+module_init(rt_imx_uart_init);
+module_exit(rt_imx_uart_exit);
+++ linux-patched/drivers/xenomai/serial/16550A_pnp.h	2022-03-21 12:58:31.241870902 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006-2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#if defined(CONFIG_PNP) && \
+    (defined(CONFIG_XENO_DRIVERS_16550A_PIO) || \
+     defined(CONFIG_XENO_DRIVERS_16550A_ANY))
+
+#include <linux/pnp.h>
+
+#define UNKNOWN_DEV 0x3000
+
+/* Bluntly cloned from drivers/serial/8250_pnp.c */
+static const struct pnp_device_id rt_16550_pnp_tbl[] = {
+	/* Archtek America Corp. */
+	/* Archtek SmartLink Modem 3334BT Plug & Play */
+	{	"AAC000F",		0	},
+	/* Anchor Datacomm BV */
+	/* SXPro 144 External Data Fax Modem Plug & Play */
+	{	"ADC0001",		0	},
+	/* SXPro 288 External Data Fax Modem Plug & Play */
+	{	"ADC0002",		0	},
+	/* PROLiNK 1456VH ISA PnP K56flex Fax Modem */
+	{	"AEI0250",		0	},
+	/* Actiontec ISA PNP 56K X2 Fax Modem */
+	{	"AEI1240",		0	},
+	/* Rockwell 56K ACF II Fax+Data+Voice Modem */
+	{	"AKY1021",		0 /*SPCI_FL_NO_SHIRQ*/	},
+	/* AZT3005 PnP SOUND DEVICE */
+	{	"AZT4001",		0	},
+	/* Best Data Products Inc. Smart One 336F PnP Modem */
+	{	"BDP3336",		0	},
+	/*  Boca Research */
+	/* Boca Complete Ofc Communicator 14.4 Data-FAX */
+	{	"BRI0A49",		0	},
+	/* Boca Research 33,600 ACF Modem */
+	{	"BRI1400",		0	},
+	/* Boca 33.6 Kbps Internal FD34FSVD */
+	{	"BRI3400",		0	},
+	/* Boca 33.6 Kbps Internal FD34FSVD */
+	{	"BRI0A49",		0	},
+	/* Best Data Products Inc. Smart One 336F PnP Modem */
+	{	"BDP3336",		0	},
+	/* Computer Peripherals Inc */
+	/* EuroViVa CommCenter-33.6 SP PnP */
+	{	"CPI4050",		0	},
+	/* Creative Labs */
+	/* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */
+	{	"CTL3001",		0	},
+	/* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */
+	{	"CTL3011",		0	},
+	/* Creative */
+	/* Creative Modem Blaster Flash56 DI5601-1 */
+	{	"DMB1032",		0	},
+	/* Creative Modem Blaster V.90 DI5660 */
+	{	"DMB2001",		0	},
+	/* E-Tech */
+	/* E-Tech CyberBULLET PC56RVP */
+	{	"ETT0002",		0	},
+	/* FUJITSU */
+	/* Fujitsu 33600 PnP-I2 R Plug & Play */
+	{	"FUJ0202",		0	},
+	/* Fujitsu FMV-FX431 Plug & Play */
+	{	"FUJ0205",		0	},
+	/* Fujitsu 33600 PnP-I4 R Plug & Play */
+	{	"FUJ0206",		0	},
+	/* Fujitsu Fax Voice 33600 PNP-I5 R Plug & Play */
+	{	"FUJ0209",		0	},
+	/* Archtek America Corp. */
+	/* Archtek SmartLink Modem 3334BT Plug & Play */
+	{	"GVC000F",		0	},
+	/* Hayes */
+	/* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */
+	{	"HAY0001",		0	},
+	/* Hayes Optima 336 V.34 + FAX + Voice PnP */
+	{	"HAY000C",		0	},
+	/* Hayes Optima 336B V.34 + FAX + Voice PnP */
+	{	"HAY000D",		0	},
+	/* Hayes Accura 56K Ext Fax Modem PnP */
+	{	"HAY5670",		0	},
+	/* Hayes Accura 56K Ext Fax Modem PnP */
+	{	"HAY5674",		0	},
+	/* Hayes Accura 56K Fax Modem PnP */
+	{	"HAY5675",		0	},
+	/* Hayes 288, V.34 + FAX */
+	{	"HAYF000",		0	},
+	/* Hayes Optima 288 V.34 + FAX + Voice, Plug & Play */
+	{	"HAYF001",		0	},
+	/* IBM */
+	/* IBM Thinkpad 701 Internal Modem Voice */
+	{	"IBM0033",		0	},
+	/* Intertex */
+	/* Intertex 28k8 33k6 Voice EXT PnP */
+	{	"IXDC801",		0	},
+	/* Intertex 33k6 56k Voice EXT PnP */
+	{	"IXDC901",		0	},
+	/* Intertex 28k8 33k6 Voice SP EXT PnP */
+	{	"IXDD801",		0	},
+	/* Intertex 33k6 56k Voice SP EXT PnP */
+	{	"IXDD901",		0	},
+	/* Intertex 28k8 33k6 Voice SP INT PnP */
+	{	"IXDF401",		0	},
+	/* Intertex 28k8 33k6 Voice SP EXT PnP */
+	{	"IXDF801",		0	},
+	/* Intertex 33k6 56k Voice SP EXT PnP */
+	{	"IXDF901",		0	},
+	/* Kortex International */
+	/* KORTEX 28800 Externe PnP */
+	{	"KOR4522",		0	},
+	/* KXPro 33.6 Vocal ASVD PnP */
+	{	"KORF661",		0	},
+	/* Lasat */
+	/* LASAT Internet 33600 PnP */
+	{	"LAS4040",		0	},
+	/* Lasat Safire 560 PnP */
+	{	"LAS4540",		0	},
+	/* Lasat Safire 336  PnP */
+	{	"LAS5440",		0	},
+	/* Microcom, Inc. */
+	/* Microcom TravelPorte FAST V.34 Plug & Play */
+	{	"MNP0281",		0	},
+	/* Microcom DeskPorte V.34 FAST or FAST+ Plug & Play */
+	{	"MNP0336",		0	},
+	/* Microcom DeskPorte FAST EP 28.8 Plug & Play */
+	{	"MNP0339",		0	},
+	/* Microcom DeskPorte 28.8P Plug & Play */
+	{	"MNP0342",		0	},
+	/* Microcom DeskPorte FAST ES 28.8 Plug & Play */
+	{	"MNP0500",		0	},
+	/* Microcom DeskPorte FAST ES 28.8 Plug & Play */
+	{	"MNP0501",		0	},
+	/* Microcom DeskPorte 28.8S Internal Plug & Play */
+	{	"MNP0502",		0	},
+	/* Motorola */
+	/* Motorola BitSURFR Plug & Play */
+	{	"MOT1105",		0	},
+	/* Motorola TA210 Plug & Play */
+	{	"MOT1111",		0	},
+	/* Motorola HMTA 200 (ISDN) Plug & Play */
+	{	"MOT1114",		0	},
+	/* Motorola BitSURFR Plug & Play */
+	{	"MOT1115",		0	},
+	/* Motorola Lifestyle 28.8 Internal */
+	{	"MOT1190",		0	},
+	/* Motorola V.3400 Plug & Play */
+	{	"MOT1501",		0	},
+	/* Motorola Lifestyle 28.8 V.34 Plug & Play */
+	{	"MOT1502",		0	},
+	/* Motorola Power 28.8 V.34 Plug & Play */
+	{	"MOT1505",		0	},
+	/* Motorola ModemSURFR External 28.8 Plug & Play */
+	{	"MOT1509",		0	},
+	/* Motorola Premier 33.6 Desktop Plug & Play */
+	{	"MOT150A",		0	},
+	/* Motorola VoiceSURFR 56K External PnP */
+	{	"MOT150F",		0	},
+	/* Motorola ModemSURFR 56K External PnP */
+	{	"MOT1510",		0	},
+	/* Motorola ModemSURFR 56K Internal PnP */
+	{	"MOT1550",		0	},
+	/* Motorola ModemSURFR Internal 28.8 Plug & Play */
+	{	"MOT1560",		0	},
+	/* Motorola Premier 33.6 Internal Plug & Play */
+	{	"MOT1580",		0	},
+	/* Motorola OnlineSURFR 28.8 Internal Plug & Play */
+	{	"MOT15B0",		0	},
+	/* Motorola VoiceSURFR 56K Internal PnP */
+	{	"MOT15F0",		0	},
+	/* Com 1 */
+	/*  Deskline K56 Phone System PnP */
+	{	"MVX00A1",		0	},
+	/* PC Rider K56 Phone System PnP */
+	{	"MVX00F2",		0	},
+	/* NEC 98NOTE SPEAKER PHONE FAX MODEM(33600bps) */
+	{	"nEC8241",		0	},
+	/* Pace 56 Voice Internal Plug & Play Modem */
+	{	"PMC2430",		0	},
+	/* Generic */
+	/* Generic standard PC COM port	 */
+	{	"PNP0500",		0	},
+	/* Generic 16550A-compatible COM port */
+	{	"PNP0501",		0	},
+	/* Compaq 14400 Modem */
+	{	"PNPC000",		0	},
+	/* Compaq 2400/9600 Modem */
+	{	"PNPC001",		0	},
+	/* Dial-Up Networking Serial Cable between 2 PCs */
+	{	"PNPC031",		0	},
+	/* Dial-Up Networking Parallel Cable between 2 PCs */
+	{	"PNPC032",		0	},
+	/* Standard 9600 bps Modem */
+	{	"PNPC100",		0	},
+	/* Standard 14400 bps Modem */
+	{	"PNPC101",		0	},
+	/*  Standard 28800 bps Modem*/
+	{	"PNPC102",		0	},
+	/*  Standard Modem*/
+	{	"PNPC103",		0	},
+	/*  Standard 9600 bps Modem*/
+	{	"PNPC104",		0	},
+	/*  Standard 14400 bps Modem*/
+	{	"PNPC105",		0	},
+	/*  Standard 28800 bps Modem*/
+	{	"PNPC106",		0	},
+	/*  Standard Modem */
+	{	"PNPC107",		0	},
+	/* Standard 9600 bps Modem */
+	{	"PNPC108",		0	},
+	/* Standard 14400 bps Modem */
+	{	"PNPC109",		0	},
+	/* Standard 28800 bps Modem */
+	{	"PNPC10A",		0	},
+	/* Standard Modem */
+	{	"PNPC10B",		0	},
+	/* Standard 9600 bps Modem */
+	{	"PNPC10C",		0	},
+	/* Standard 14400 bps Modem */
+	{	"PNPC10D",		0	},
+	/* Standard 28800 bps Modem */
+	{	"PNPC10E",		0	},
+	/* Standard Modem */
+	{	"PNPC10F",		0	},
+	/* Standard PCMCIA Card Modem */
+	{	"PNP2000",		0	},
+	/* Rockwell */
+	/* Modular Technology */
+	/* Rockwell 33.6 DPF Internal PnP */
+	/* Modular Technology 33.6 Internal PnP */
+	{	"ROK0030",		0	},
+	/* Kortex International */
+	/* KORTEX 14400 Externe PnP */
+	{	"ROK0100",		0	},
+	/* Rockwell 28.8 */
+	{	"ROK4120",		0	},
+	/* Viking Components, Inc */
+	/* Viking 28.8 INTERNAL Fax+Data+Voice PnP */
+	{	"ROK4920",		0	},
+	/* Rockwell */
+	/* British Telecom */
+	/* Modular Technology */
+	/* Rockwell 33.6 DPF External PnP */
+	/* BT Prologue 33.6 External PnP */
+	/* Modular Technology 33.6 External PnP */
+	{	"RSS00A0",		0	},
+	/* Viking 56K FAX INT */
+	{	"RSS0262",		0	},
+	/* K56 par,VV,Voice,Speakphone,AudioSpan,PnP */
+	{       "RSS0250",              0       },
+	/* SupraExpress 28.8 Data/Fax PnP modem */
+	{	"SUP1310",		0	},
+	/* SupraExpress 33.6 Data/Fax PnP modem */
+	{	"SUP1421",		0	},
+	/* SupraExpress 33.6 Data/Fax PnP modem */
+	{	"SUP1590",		0	},
+	/* SupraExpress 336i Sp ASVD */
+	{	"SUP1620",		0	},
+	/* SupraExpress 33.6 Data/Fax PnP modem */
+	{	"SUP1760",		0	},
+	/* SupraExpress 56i Sp Intl */
+	{	"SUP2171",		0	},
+	/* Phoebe Micro */
+	/* Phoebe Micro 33.6 Data Fax 1433VQH Plug & Play */
+	{	"TEX0011",		0	},
+	/* Archtek America Corp. */
+	/* Archtek SmartLink Modem 3334BT Plug & Play */
+	{	"UAC000F",		0	},
+	/* 3Com Corp. */
+	/* Gateway Telepath IIvi 33.6 */
+	{	"USR0000",		0	},
+	/* U.S. Robotics Sporster 33.6K Fax INT PnP */
+	{	"USR0002",		0	},
+	/*  Sportster Vi 14.4 PnP FAX Voicemail */
+	{	"USR0004",		0	},
+	/* U.S. Robotics 33.6K Voice INT PnP */
+	{	"USR0006",		0	},
+	/* U.S. Robotics 33.6K Voice EXT PnP */
+	{	"USR0007",		0	},
+	/* U.S. Robotics Courier V.Everything INT PnP */
+	{	"USR0009",		0	},
+	/* U.S. Robotics 33.6K Voice INT PnP */
+	{	"USR2002",		0	},
+	/* U.S. Robotics 56K Voice INT PnP */
+	{	"USR2070",		0	},
+	/* U.S. Robotics 56K Voice EXT PnP */
+	{	"USR2080",		0	},
+	/* U.S. Robotics 56K FAX INT */
+	{	"USR3031",		0	},
+	/* U.S. Robotics 56K FAX INT */
+	{	"USR3050",		0	},
+	/* U.S. Robotics 56K Voice INT PnP */
+	{	"USR3070",		0	},
+	/* U.S. Robotics 56K Voice EXT PnP */
+	{	"USR3080",		0	},
+	/* U.S. Robotics 56K Voice INT PnP */
+	{	"USR3090",		0	},
+	/* U.S. Robotics 56K Message  */
+	{	"USR9100",		0	},
+	/* U.S. Robotics 56K FAX EXT PnP*/
+	{	"USR9160",		0	},
+	/* U.S. Robotics 56K FAX INT PnP*/
+	{	"USR9170",		0	},
+	/* U.S. Robotics 56K Voice EXT PnP*/
+	{	"USR9180",		0	},
+	/* U.S. Robotics 56K Voice INT PnP*/
+	{	"USR9190",		0	},
+	/* Wacom tablets */
+	{	"WACF004",		0	},
+	{	"WACF005",		0	},
+	{       "WACF006",              0       },
+	/* Compaq touchscreen */
+	{       "FPI2002",              0 },
+	/* Fujitsu Stylistic touchscreens */
+	{       "FUJ02B2",              0 },
+	{       "FUJ02B3",              0 },
+	/* Fujitsu Stylistic LT touchscreens */
+	{       "FUJ02B4",              0 },
+	/* Passive Fujitsu Stylistic touchscreens */
+	{       "FUJ02B6",              0 },
+	{       "FUJ02B7",              0 },
+	{       "FUJ02B8",              0 },
+	{       "FUJ02B9",              0 },
+	{       "FUJ02BC",              0 },
+	/* Rockwell's (PORALiNK) 33600 INT PNP */
+	{	"WCI0003",		0	},
+	/* Unkown PnP modems */
+	{	"PNPCXXX",		UNKNOWN_DEV	},
+	/* More unkown PnP modems */
+	{	"PNPDXXX",		UNKNOWN_DEV	},
+	{	"",			0	}
+};
+
+static int rt_16550_pnp_probe(struct pnp_dev *dev,
+			       const struct pnp_device_id *dev_id)
+{
+	int i;
+
+	for (i = 0; i < MAX_DEVICES; i++)
+		if (pnp_port_valid(dev, 0) &&
+		    pnp_port_start(dev, 0) == io[i]) {
+			if (!irq[i])
+				irq[i] = pnp_irq(dev, 0);
+			return 0;
+		}
+
+	return -ENODEV;
+}
+
+static struct pnp_driver rt_16550_pnp_driver = {
+	.name		= RT_16550_DRIVER_NAME,
+	.id_table	= rt_16550_pnp_tbl,
+	.probe		= rt_16550_pnp_probe,
+};
+
+static int pnp_registered;
+
+static inline void rt_16550_pnp_init(void)
+{
+	if (pnp_register_driver(&rt_16550_pnp_driver) == 0)
+		pnp_registered = 1;
+}
+
+static inline void rt_16550_pnp_cleanup(void)
+{
+	if (pnp_registered)
+		pnp_unregister_driver(&rt_16550_pnp_driver);
+}
+
+#else /* !CONFIG_PNP || !(..._16550A_IO || ..._16550A_ANY) */
+
+#define rt_16550_pnp_init()	do { } while (0)
+#define rt_16550_pnp_cleanup()	do { } while (0)
+
+#endif /* !CONFIG_PNP || !(..._16550A_IO || ..._16550A_ANY) */
+++ linux-patched/drivers/xenomai/serial/Kconfig	2022-03-21 12:58:31.234870970 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/mpc52xx_uart.c	1970-01-01 01:00:00.000000000 +0100
+menu "Serial drivers"
+
+config XENO_DRIVERS_16550A
+	tristate "16550A UART driver"
+	help
+	Real-time UART driver for 16550A compatible controllers. See
+	doc/txt/16550A-driver.txt for more details.
+
+choice
+	prompt "Hardware access mode"
+	depends on XENO_DRIVERS_16550A
+	default XENO_DRIVERS_16550A_PIO
+
+config XENO_DRIVERS_16550A_PIO
+	bool "Port-based I/O"
+	help
+	Hardware access only via I/O ports. Use module parameter
+	"io=<port>[,<port>[,...]]" to specify the base port of a device.
+
+config XENO_DRIVERS_16550A_MMIO
+	bool "Memory-mapped I/O"
+	help
+	Hardware access only via memory mapping. Use module paramter
+	"mem=<addr>[,<addr>[,...]]" to specify the physical base address of
+	a device.
+
+config XENO_DRIVERS_16550A_ANY
+	bool "Any access mode"
+	help
+	Decide at module load-time (or via kernel parameter) which access
+	mode to use for which device. This mode is useful when devices of
+	both types can be present in a system, also at the same time.
+
+	Both "io" and "mem" module parameters are available, but always only
+	one of them can be applied on a particular device. Use, e.g.,
+	"io=0x3f8,0 mem=0,0xe0000000" to address device 1 via IO base port
+	0x3f8 and device 2 via physical base address 0xe0000000.
+
+endchoice
+
+config XENO_DRIVERS_16550A_PCI
+	depends on PCI && (XENO_DRIVERS_16550A_PIO || XENO_DRIVERS_16550A_ANY)
+	bool "PCI board support"
+	default n
+	help
+
+	This option activates support for PCI serial boards.
+
+config XENO_DRIVERS_16550A_PCI_MOXA
+	depends on XENO_DRIVERS_16550A_PCI
+	bool "Moxa PCI boards"
+	default n
+	help
+
+	This option activates support for the following Moxa boards:
+	PCI Serial Boards:
+	  C104H/PCI, C168H/PCI
+	  CP-114, CP-132
+	Universal PCI Serial Boards:
+	  CP-102U, CP-102UL, CP-104U
+	  CP-112UL, CP-114UL, CP-118U
+	  CP-132U, CP-134U, CP-138U
+	  CP-168U
+
+config XENO_DRIVERS_MPC52XX_UART
+	depends on PPC_MPC52xx
+	tristate "MPC52xx PSC UART driver"
+	help
+	Real-time UART driver for the PSC on the MPC5200 processor.
+
+config XENO_DRIVERS_IMX_UART
+	depends on ARCH_IMX || ARCH_MXC
+	tristate "RT IMX UART driver"
+	select RATIONAL
+	help
+	Real-time UART driver for the Freescale Semiconductor MXC Internal
+	UART compatible controllers.
+
+endmenu
+++ linux-patched/drivers/xenomai/serial/mpc52xx_uart.c	2022-03-21 12:58:31.226871048 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/16550A_io.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2011 Wolfgang Grandegger <wg@denx.de>.
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#include <asm/mpc52xx.h>
+#include <asm/mpc52xx_psc.h>
+
+#include <rtdm/serial.h>
+#include <rtdm/driver.h>
+
+MODULE_DESCRIPTION("RTDM-based driver for MPC52xx UARTs");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@denx.de>");
+MODULE_VERSION("1.0.0");
+MODULE_LICENSE("GPL");
+
+#define RT_MPC52XX_UART_DRVNAM	"xeno_mpc52xx_uart"
+
+#define IN_BUFFER_SIZE		512
+#define OUT_BUFFER_SIZE		512
+
+#define PARITY_MASK		0x03
+#define DATA_BITS_MASK		0x03
+#define STOP_BITS_MASK		0x01
+#define FIFO_MASK		0xC0
+#define EVENT_MASK		0x0F
+
+
+struct rt_mpc52xx_uart_port {
+	const struct device *dev;
+	struct mpc52xx_psc __iomem *psc;
+	struct mpc52xx_psc_fifo __iomem *fifo;
+	unsigned int uartclk;
+	int irq;
+	int num;
+};
+
+struct rt_mpc52xx_uart_ctx {
+	struct rtser_config config;	/* current device configuration */
+
+	rtdm_irq_t irq_handle;		/* device IRQ handle */
+	rtdm_lock_t lock;		/* lock to protect context struct */
+
+	int in_head;			/* RX ring buffer, head pointer */
+	int in_tail;			/* RX ring buffer, tail pointer */
+	size_t in_npend;		/* pending bytes in RX ring */
+	int in_nwait;			/* bytes the user waits for */
+	rtdm_event_t in_event;		/* raised to unblock reader */
+	char in_buf[IN_BUFFER_SIZE];	/* RX ring buffer */
+	volatile unsigned long in_lock;	/* single-reader lock */
+	uint64_t *in_history;		/* RX timestamp buffer */
+
+	int out_head;			/* TX ring buffer, head pointer */
+	int out_tail;			/* TX ring buffer, tail pointer */
+	size_t out_npend;		/* pending bytes in TX ring */
+	rtdm_event_t out_event;		/* raised to unblock writer */
+	char out_buf[OUT_BUFFER_SIZE];	/* TX ring buffer */
+	rtdm_mutex_t out_lock;		/* single-writer mutex */
+
+	uint64_t last_timestamp;	/* timestamp of last event */
+	int ioc_events;			/* recorded events */
+	rtdm_event_t ioc_event;		/* raised to unblock event waiter */
+	volatile unsigned long ioc_event_lock;	/* single-waiter lock */
+
+
+	int mcr_status;			/* emulated MCR cache */
+	int status;			/* cache for LSR + soft-states */
+	int saved_errors;		/* error cache for RTIOC_GET_STATUS */
+
+	unsigned int imr_status;	/* interrupt mask register cache */
+	int tx_empty;			/* shift register empty flag */
+
+	struct rt_mpc52xx_uart_port *port; /* Port related data */
+};
+
+static const struct rtser_config default_config = {
+	.config_mask = 0xFFFF,
+	.baud_rate = RTSER_DEF_BAUD,
+	.parity = RTSER_DEF_PARITY,
+	.data_bits = RTSER_DEF_BITS,
+	.stop_bits = RTSER_DEF_STOPB,
+	.handshake = RTSER_DEF_HAND,
+	.fifo_depth = RTSER_DEF_FIFO_DEPTH,
+	.rx_timeout = RTSER_DEF_TIMEOUT,
+	.tx_timeout = RTSER_DEF_TIMEOUT,
+	.event_timeout = RTSER_DEF_TIMEOUT,
+	.timestamp_history = RTSER_DEF_TIMESTAMP_HISTORY,
+	.event_mask = RTSER_DEF_EVENT_MASK,
+	.rs485 = RTSER_DEF_RS485,
+};
+
+/* lookup table for matching device nodes to index numbers */
+static struct device_node *rt_mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM];
+
+static inline void psc_fifo_init(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	out_8(&ctx->port->fifo->rfcntl, 0x00);
+	out_be16(&ctx->port->fifo->rfalarm, 0x1ff);
+	out_8(&ctx->port->fifo->tfcntl, 0x07);
+	out_be16(&ctx->port->fifo->tfalarm, 0x80);
+}
+
+static inline int psc_raw_rx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_status) &
+		MPC52xx_PSC_SR_RXRDY;
+}
+
+static inline int psc_raw_tx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_status) &
+		MPC52xx_PSC_SR_TXRDY;
+}
+
+static inline int psc_rx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_isr) &
+		ctx->imr_status & MPC52xx_PSC_IMR_RXRDY;
+}
+
+static int psc_tx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_isr) &
+		ctx->imr_status & MPC52xx_PSC_IMR_TXRDY;
+}
+
+static inline int psc_tx_empty(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_status) &
+		MPC52xx_PSC_SR_TXEMP;
+}
+
+static inline void psc_start_tx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status |= MPC52xx_PSC_IMR_TXRDY;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static inline void psc_stop_tx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status &= ~MPC52xx_PSC_IMR_TXRDY;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static inline void psc_stop_rx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status &= ~MPC52xx_PSC_IMR_RXRDY;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static inline void psc_write_char(struct rt_mpc52xx_uart_ctx *ctx,
+				  unsigned char c)
+{
+	out_8(&ctx->port->psc->mpc52xx_psc_buffer_8, c);
+}
+
+static inline unsigned char psc_read_char(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_8(&ctx->port->psc->mpc52xx_psc_buffer_8);
+}
+
+static inline void psc_disable_ints(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status = 0;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static void psc_set_mcr(struct rt_mpc52xx_uart_ctx *ctx,
+			unsigned int mcr)
+{
+	if (mcr & RTSER_MCR_RTS)
+		out_8(&ctx->port->psc->op1, MPC52xx_PSC_OP_RTS);
+	else
+		out_8(&ctx->port->psc->op0, MPC52xx_PSC_OP_RTS);
+}
+
+/* FIXME: status interrupts not yet handled properly */
+static unsigned int psc_get_msr(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	unsigned int msr = RTSER_MSR_DSR;
+	u8 status = in_8(&ctx->port->psc->mpc52xx_psc_ipcr);
+
+	if (!(status & MPC52xx_PSC_CTS))
+		msr |= RTSER_MSR_CTS;
+	if (!(status & MPC52xx_PSC_DCD))
+		msr |= RTSER_MSR_DCD;
+
+	return msr;
+}
+
+static void psc_enable_ms(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	struct mpc52xx_psc *psc = ctx->port->psc;
+
+	/* clear D_*-bits by reading them */
+	in_8(&psc->mpc52xx_psc_ipcr);
+	/* enable CTS and DCD as IPC interrupts */
+	out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD);
+
+	ctx->imr_status |= MPC52xx_PSC_IMR_IPC;
+	out_be16(&psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static void psc_disable_ms(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	struct mpc52xx_psc *psc = ctx->port->psc;
+
+	/* disable CTS and DCD as IPC interrupts */
+	out_8(&psc->mpc52xx_psc_acr, 0);
+
+	ctx->imr_status &= ~MPC52xx_PSC_IMR_IPC;
+	out_be16(&psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static struct of_device_id mpc5200_gpio_ids[] = {
+	{ .compatible = "fsl,mpc5200-gpio", },
+	{ .compatible = "mpc5200-gpio", },
+	{}
+};
+
+static void rt_mpc52xx_uart_init_hw(struct rt_mpc52xx_uart_port *port)
+{
+	struct mpc52xx_gpio __iomem *gpio;
+	struct device_node *gpio_np;
+	u32 port_config;
+
+	if (port->num == 6) {
+		gpio_np = of_find_matching_node(NULL, mpc5200_gpio_ids);
+		gpio = of_iomap(gpio_np, 0);
+		of_node_put(gpio_np);
+		if (!gpio) {
+			dev_err(port->dev, "PSC%d port_config: "
+				"couldn't map gpio ids\n", port->num);
+			return;
+		}
+		port_config = in_be32(&gpio->port_config);
+		port_config &= 0xFF0FFFFF; /* port config for PSC6 */
+		port_config |= 0x00500000;
+		dev_dbg(port->dev, "PSC%d port_config: old:%x new:%x\n",
+			port->num, in_be32(&gpio->port_config), port_config);
+		out_be32(&gpio->port_config, port_config);
+		iounmap(gpio);
+	}
+}
+
+static inline void rt_mpc52xx_uart_put_char(struct rt_mpc52xx_uart_ctx *ctx,
+					    uint64_t *timestamp,
+					    unsigned char ch)
+{
+	ctx->in_buf[ctx->in_tail] = ch;
+	if (ctx->in_history)
+		ctx->in_history[ctx->in_tail] = *timestamp;
+	ctx->in_tail = (ctx->in_tail + 1) & (IN_BUFFER_SIZE - 1);
+
+	if (++ctx->in_npend > IN_BUFFER_SIZE) {
+		ctx->status |= RTSER_SOFT_OVERRUN_ERR;
+		ctx->in_npend--;
+	}
+}
+
+static inline int rt_mpc52xx_uart_rx_interrupt(struct rt_mpc52xx_uart_ctx *ctx,
+					       uint64_t *timestamp)
+{
+	int rbytes = 0;
+	int psc_status;
+
+	psc_status = in_be16(&ctx->port->psc->mpc52xx_psc_status);
+	while (psc_status & MPC52xx_PSC_SR_RXRDY) {
+		/* read input character */
+		rt_mpc52xx_uart_put_char(ctx, timestamp, psc_read_char(ctx));
+		rbytes++;
+
+		/* save new errors */
+		if (psc_status & (MPC52xx_PSC_SR_OE | MPC52xx_PSC_SR_PE |
+				  MPC52xx_PSC_SR_FE | MPC52xx_PSC_SR_RB)) {
+			if (psc_status & MPC52xx_PSC_SR_PE)
+				ctx->status |= RTSER_LSR_PARITY_ERR;
+			if (psc_status & MPC52xx_PSC_SR_FE)
+				ctx->status |= RTSER_LSR_FRAMING_ERR;
+			if (psc_status & MPC52xx_PSC_SR_RB)
+				ctx->status |= RTSER_LSR_BREAK_IND;
+
+			/*
+			 * Overrun is special, since it's reported
+			 * immediately, and doesn't affect the current
+			 * character.
+			 */
+			if (psc_status & MPC52xx_PSC_SR_OE) {
+				ctx->status |= RTSER_LSR_OVERRUN_ERR;
+				rt_mpc52xx_uart_put_char(ctx, timestamp, 0);
+				rbytes++;
+			}
+
+			/* Clear error condition */
+			out_8(&ctx->port->psc->command,
+			      MPC52xx_PSC_RST_ERR_STAT);
+		}
+
+		psc_status = in_be16(&ctx->port->psc->mpc52xx_psc_status);
+	};
+
+	return rbytes;
+}
+
+static inline int rt_mpc52xx_uart_tx_interrupt(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	while (psc_raw_tx_rdy(ctx) && (ctx->out_npend > 0)) {
+		if (ctx->config.rs485 &&
+		    (ctx->mcr_status & RTSER_MCR_RTS) == 0) {
+			/* switch RTS */
+			ctx->mcr_status |= RTSER_MCR_RTS;
+			dev_dbg(ctx->port->dev, "Set RTS, mcr_status=%#x\n",
+				ctx->mcr_status);
+			psc_set_mcr(ctx, ctx->mcr_status);
+		}
+		if (ctx->config.rs485 ||
+		    ((ctx->config.event_mask & RTSER_EVENT_TXEMPTY) &&
+		     (ctx->imr_status & MPC52xx_PSC_IMR_TXEMP) == 0)) {
+			/* enable tx-empty interrupt */
+			ctx->imr_status |= MPC52xx_PSC_IMR_TXEMP;
+			dev_dbg(ctx->port->dev, "Enable TXEMP interrupt, "
+				"imr_status=%#x\n", ctx->imr_status);
+			out_be16(&ctx->port->psc->mpc52xx_psc_imr,
+				 ctx->imr_status);
+		}
+
+		psc_write_char(ctx, ctx->out_buf[ctx->out_head++]);
+		ctx->out_head &= OUT_BUFFER_SIZE - 1;
+		ctx->out_npend--;
+	}
+
+	return ctx->out_npend;
+}
+
+static int rt_mpc52xx_uart_interrupt(rtdm_irq_t *irq_context)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	uint64_t timestamp = rtdm_clock_read();
+	int rbytes = 0;
+	int events = 0;
+	int ret = RTDM_IRQ_NONE;
+	int goon = 1;
+	int n;
+
+	ctx = rtdm_irq_get_arg(irq_context, struct rt_mpc52xx_uart_ctx);
+
+	rtdm_lock_get(&ctx->lock);
+
+	while (goon) {
+		goon = 0;
+		if (psc_rx_rdy(ctx)) {
+			dev_dbg(ctx->port->dev, "RX interrupt\n");
+			n = rt_mpc52xx_uart_rx_interrupt(ctx, &timestamp);
+			if (n) {
+				rbytes += n;
+				events |= RTSER_EVENT_RXPEND;
+			}
+		}
+		if (psc_tx_rdy(ctx))
+			goon |= rt_mpc52xx_uart_tx_interrupt(ctx);
+
+		if (psc_tx_empty(ctx)) {
+			if (ctx->config.rs485 &&
+			    (ctx->mcr_status & RTSER_MCR_RTS)) {
+				/* reset RTS */
+				ctx->mcr_status &= ~RTSER_MCR_RTS;
+				dev_dbg(ctx->port->dev, "Reset RTS, "
+					"mcr_status=%#x\n", ctx->mcr_status);
+				psc_set_mcr(ctx, ctx->mcr_status);
+			}
+			/* disable tx-empty interrupt */
+			ctx->imr_status &= ~MPC52xx_PSC_IMR_TXEMP;
+			dev_dbg(ctx->port->dev, "Disable TXEMP interrupt, "
+				"imr_status=%#x\n", ctx->imr_status);
+			out_be16(&ctx->port->psc->mpc52xx_psc_imr,
+				 ctx->imr_status);
+
+			events |= RTSER_EVENT_TXEMPTY;
+			ctx->tx_empty = 1;
+		}
+
+		if (ctx->config.event_mask &
+		    (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO)) {
+			u8 status = in_8(&ctx->port->psc->mpc52xx_psc_ipcr);
+
+			if (status & MPC52xx_PSC_D_DCD)
+				events |= (status & MPC52xx_PSC_DCD) ?
+					RTSER_EVENT_MODEMLO :
+					RTSER_EVENT_MODEMHI;
+			if (status & MPC52xx_PSC_D_CTS)
+				events |= (status & MPC52xx_PSC_CTS) ?
+					RTSER_EVENT_MODEMLO :
+					RTSER_EVENT_MODEMHI;
+			dev_dbg(ctx->port->dev, "Modem line changed, "
+				"events=%#x\n", events);
+		}
+
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	if (ctx->in_nwait > 0) {
+		if ((ctx->in_nwait <= rbytes) || ctx->status) {
+			ctx->in_nwait = 0;
+			rtdm_event_signal(&ctx->in_event);
+		} else
+			ctx->in_nwait -= rbytes;
+	}
+
+	if (ctx->status)
+		events |= RTSER_EVENT_ERRPEND;
+
+	if (events & ctx->config.event_mask) {
+		int old_events = ctx->ioc_events;
+
+		ctx->last_timestamp = timestamp;
+		ctx->ioc_events = events;
+
+		if (!old_events)
+			rtdm_event_signal(&ctx->ioc_event);
+	}
+
+	if ((ctx->imr_status & MPC52xx_PSC_IMR_TXRDY) &&
+	    (ctx->out_npend == 0)) {
+		psc_stop_tx(ctx);
+		rtdm_event_signal(&ctx->out_event);
+	}
+
+	rtdm_lock_put(&ctx->lock);
+
+	return ret;
+}
+
+
+static int rt_mpc52xx_uart_set_config(struct rt_mpc52xx_uart_ctx *ctx,
+				      const struct rtser_config *config,
+				      uint64_t **in_history_ptr)
+{
+	rtdm_lockctx_t lock_ctx;
+	int err = 0;
+
+	/* make line configuration atomic and IRQ-safe */
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	if (config->config_mask & RTSER_SET_BAUD)
+		ctx->config.baud_rate = config->baud_rate;
+	if (config->config_mask & RTSER_SET_PARITY)
+		ctx->config.parity = config->parity & PARITY_MASK;
+	if (config->config_mask & RTSER_SET_DATA_BITS)
+		ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
+	if (config->config_mask & RTSER_SET_STOP_BITS)
+		ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
+	if (config->config_mask & RTSER_SET_HANDSHAKE)
+		ctx->config.handshake = config->handshake;
+
+	if (config->config_mask & (RTSER_SET_PARITY |
+				   RTSER_SET_DATA_BITS | RTSER_SET_STOP_BITS |
+				   RTSER_SET_BAUD | RTSER_SET_HANDSHAKE)) {
+		struct mpc52xx_psc *psc = ctx->port->psc;
+		unsigned char mr1 = 0, mr2 = 0;
+		unsigned int divisor;
+		u16 prescaler;
+
+		switch (ctx->config.data_bits) {
+		case RTSER_5_BITS:
+			mr1 |= MPC52xx_PSC_MODE_5_BITS;
+			break;
+		case RTSER_6_BITS:
+			mr1 |= MPC52xx_PSC_MODE_6_BITS;
+			break;
+		case RTSER_7_BITS:
+			mr1 |= MPC52xx_PSC_MODE_7_BITS;
+			break;
+		case RTSER_8_BITS:
+		default:
+			mr1 |= MPC52xx_PSC_MODE_8_BITS;
+			break;
+		}
+
+		switch (ctx->config.parity) {
+		case RTSER_ODD_PARITY:
+			mr1 |= MPC52xx_PSC_MODE_PARODD;
+			break;
+		case RTSER_EVEN_PARITY:
+			mr1 |= MPC52xx_PSC_MODE_PAREVEN;
+			break;
+		case RTSER_NO_PARITY:
+		default:
+			mr1 |= MPC52xx_PSC_MODE_PARNONE;
+			break;
+		}
+
+		if (ctx->config.stop_bits == RTSER_2_STOPB)
+			mr2 |= (ctx->config.data_bits == RTSER_5_BITS) ?
+				MPC52xx_PSC_MODE_ONE_STOP_5_BITS :
+				MPC52xx_PSC_MODE_TWO_STOP;
+		else
+			mr2 |= MPC52xx_PSC_MODE_ONE_STOP;
+
+		if (ctx->config.handshake == RTSER_RTSCTS_HAND) {
+			mr1 |= MPC52xx_PSC_MODE_RXRTS;
+			mr2 |= MPC52xx_PSC_MODE_TXCTS;
+		} else if (config->config_mask & RTSER_SET_HANDSHAKE) {
+			ctx->mcr_status =
+				RTSER_MCR_DTR | RTSER_MCR_RTS | RTSER_MCR_OUT2;
+			psc_set_mcr(ctx, ctx->mcr_status);
+		}
+
+		/* Reset the TX & RX */
+		out_8(&psc->command, MPC52xx_PSC_RST_RX);
+		out_8(&psc->command, MPC52xx_PSC_RST_TX);
+
+		/* Send new mode settings */
+		out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
+		out_8(&psc->mode, mr1);
+		out_8(&psc->mode, mr2);
+
+		/* Set baudrate */
+		divisor = (ctx->port->uartclk + 16 * ctx->config.baud_rate) /
+			(32 * ctx->config.baud_rate);
+		prescaler = 0xdd00;
+		out_be16(&psc->mpc52xx_psc_clock_select, prescaler);
+		out_8(&psc->ctur, divisor >> 8);
+		out_8(&psc->ctlr, divisor & 0xff);
+
+		dev_info(ctx->port->dev,
+			 "mr1=%#x mr2=%#x baud=%d divisor=%d prescaler=%x\n",
+			 mr1, mr2, ctx->config.baud_rate, divisor, prescaler);
+
+		/* Reenable TX & RX */
+		out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
+		out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
+
+		/* Enable RX */
+		ctx->imr_status |= MPC52xx_PSC_IMR_RXRDY;
+		out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+
+		ctx->status = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+	}
+
+	if (config->config_mask & RTSER_SET_RS485) {
+		ctx->config.rs485 = config->rs485;
+		if (config->rs485) {
+			/* reset RTS */
+			ctx->mcr_status &= ~RTSER_MCR_RTS;
+			dev_dbg(ctx->port->dev, "Reset RTS, mcr_status=%#x\n",
+				ctx->mcr_status);
+			psc_set_mcr(ctx, ctx->mcr_status);
+		}
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	/* Timeout manipulation is not atomic. The user is supposed to take
+	   care not to use and change timeouts at the same time. */
+	if (config->config_mask & RTSER_SET_TIMEOUT_RX)
+		ctx->config.rx_timeout = config->rx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_TX)
+		ctx->config.tx_timeout = config->tx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
+		ctx->config.event_timeout = config->event_timeout;
+
+	if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+		/* change timestamp history atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
+			if (!ctx->in_history) {
+				ctx->in_history = *in_history_ptr;
+				*in_history_ptr = NULL;
+				if (!ctx->in_history)
+					err = -ENOMEM;
+			}
+		} else {
+			*in_history_ptr = ctx->in_history;
+			ctx->in_history = NULL;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	if (config->config_mask & RTSER_SET_EVENT_MASK) {
+		/* change event mask atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		ctx->config.event_mask = config->event_mask & EVENT_MASK;
+		ctx->ioc_events = 0;
+
+		if ((config->event_mask & RTSER_EVENT_RXPEND) &&
+		    (ctx->in_npend > 0))
+			ctx->ioc_events |= RTSER_EVENT_RXPEND;
+
+		if ((config->event_mask & RTSER_EVENT_ERRPEND) &&
+		    ctx->status)
+			ctx->ioc_events |= RTSER_EVENT_ERRPEND;
+
+		if ((config->event_mask & RTSER_EVENT_TXEMPTY) &&
+		    !ctx->out_npend && ctx->tx_empty)
+			ctx->ioc_events |= RTSER_EVENT_TXEMPTY;
+
+		if (config->event_mask &
+		    (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+			psc_enable_ms(ctx);
+		else
+			psc_disable_ms(ctx);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	return err;
+}
+
+void rt_mpc52xx_uart_cleanup_ctx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	rtdm_event_destroy(&ctx->in_event);
+	rtdm_event_destroy(&ctx->out_event);
+	rtdm_event_destroy(&ctx->ioc_event);
+	rtdm_mutex_destroy(&ctx->out_lock);
+}
+
+static int rt_mpc52xx_uart_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	uint64_t *dummy;
+	int err;
+
+	ctx = rtdm_fd_to_private(fd);
+	ctx->port = (struct rt_mpc52xx_uart_port *)rtdm_fd_device(fd)->device_data;
+
+	/* IPC initialisation - cannot fail with used parameters */
+	rtdm_lock_init(&ctx->lock);
+	rtdm_event_init(&ctx->in_event, 0);
+	rtdm_event_init(&ctx->out_event, 0);
+	rtdm_event_init(&ctx->ioc_event, 0);
+	rtdm_mutex_init(&ctx->out_lock);
+
+	ctx->in_head = 0;
+	ctx->in_tail = 0;
+	ctx->in_npend = 0;
+	ctx->in_nwait = 0;
+	ctx->in_lock = 0;
+	ctx->in_history = NULL;
+
+	ctx->out_head = 0;
+	ctx->out_tail = 0;
+	ctx->out_npend = 0;
+
+	ctx->ioc_events = 0;
+	ctx->ioc_event_lock = 0;
+	ctx->status = 0;
+	ctx->saved_errors = 0;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	psc_disable_ints(ctx);
+
+	/* Reset/activate the port, clear and enable interrupts */
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_RST_RX);
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_RST_TX);
+
+	out_be32(&ctx->port->psc->sicr, 0);	/* UART mode DCD ignored */
+
+	psc_fifo_init(ctx);
+
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_TX_ENABLE);
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_RX_ENABLE);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rt_mpc52xx_uart_set_config(ctx, &default_config, &dummy);
+
+	err = rtdm_irq_request(&ctx->irq_handle, ctx->port->irq,
+			       rt_mpc52xx_uart_interrupt, 0,
+			       rtdm_fd_device(fd)->name, ctx);
+	if (err) {
+		psc_set_mcr(ctx, 0);
+		rt_mpc52xx_uart_cleanup_ctx(ctx);
+
+		return err;
+	}
+
+	return 0;
+}
+
+static void rt_mpc52xx_uart_close(struct rtdm_fd *fd)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	uint64_t *in_history;
+	rtdm_lockctx_t lock_ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/* reset DTR and RTS */
+	psc_set_mcr(ctx, 0);
+
+	psc_disable_ints(ctx);
+
+	in_history = ctx->in_history;
+	ctx->in_history = NULL;
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rtdm_irq_free(&ctx->irq_handle);
+
+	rt_mpc52xx_uart_cleanup_ctx(ctx);
+
+	kfree(in_history);
+}
+
+static int rt_mpc52xx_uart_ioctl(struct rtdm_fd *fd,
+				 unsigned int request, void *arg)
+{
+	rtdm_lockctx_t lock_ctx;
+	struct rt_mpc52xx_uart_ctx *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTSER_RTIOC_GET_CONFIG:
+		if (rtdm_fd_is_user(fd))
+			err = rtdm_safe_copy_to_user(fd, arg,
+						     &ctx->config,
+						     sizeof(struct
+							    rtser_config));
+		else
+			memcpy(arg, &ctx->config, sizeof(struct rtser_config));
+		break;
+
+	case RTSER_RTIOC_SET_CONFIG: {
+		struct rtser_config *config;
+		struct rtser_config config_buf;
+		uint64_t *hist_buf = NULL;
+
+		config = (struct rtser_config *)arg;
+
+		if (rtdm_fd_is_user(fd)) {
+			err = rtdm_safe_copy_from_user(fd, &config_buf,
+						       arg,
+						       sizeof(struct
+							      rtser_config));
+			if (err)
+				return err;
+
+			config = &config_buf;
+		}
+
+		if ((config->config_mask & RTSER_SET_BAUD) &&
+		    (config->baud_rate <= 0))
+			/* invalid baudrate for this port */
+			return -EINVAL;
+
+		if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+			/*
+			 * Reflect the call to non-RT as we will likely
+			 * allocate or free the buffer.
+			 */
+			if (rtdm_in_rt_context())
+				return -ENOSYS;
+
+			if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY)
+				hist_buf = kmalloc(IN_BUFFER_SIZE *
+						   sizeof(nanosecs_abs_t),
+						   GFP_KERNEL);
+		}
+
+		rt_mpc52xx_uart_set_config(ctx, config, &hist_buf);
+
+		if (hist_buf)
+			kfree(hist_buf);
+
+		break;
+	}
+
+	case RTSER_RTIOC_GET_STATUS: {
+		int status;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		status = ctx->saved_errors | ctx->status;
+		ctx->status = 0;
+		ctx->saved_errors = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rtser_status status_buf;
+
+			status_buf.line_status = status;
+			status_buf.modem_status = psc_get_msr(ctx);
+
+			err = rtdm_safe_copy_to_user(fd, arg,
+						     &status_buf,
+						     sizeof(struct
+							    rtser_status));
+		} else {
+			((struct rtser_status *)arg)->line_status = status;
+			((struct rtser_status *)arg)->modem_status =
+				psc_get_msr(ctx);
+		}
+		break;
+	}
+
+	case RTSER_RTIOC_GET_CONTROL:
+		if (rtdm_fd_is_user(fd))
+			err = rtdm_safe_copy_to_user(fd, arg,
+						     &ctx->mcr_status,
+						     sizeof(int));
+		else
+			*(int *)arg = ctx->mcr_status;
+
+		break;
+
+	case RTSER_RTIOC_SET_CONTROL: {
+		int new_mcr = (long)arg;
+
+		if ((new_mcr & RTSER_MCR_RTS) != RTSER_MCR_RTS)
+			dev_warn(ctx->port->dev,
+				 "MCR: Only RTS is supported\n");
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		ctx->mcr_status = new_mcr & RTSER_MCR_RTS;
+		psc_set_mcr(ctx, ctx->mcr_status);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTSER_RTIOC_WAIT_EVENT: {
+		struct rtser_event ev = { .rxpend_timestamp = 0 };
+		rtdm_toseq_t timeout_seq;
+
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		/* Only one waiter allowed, stop any further attempts here. */
+		if (test_and_set_bit(0, &ctx->ioc_event_lock))
+			return -EBUSY;
+
+		rtdm_toseq_init(&timeout_seq, ctx->config.event_timeout);
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		while (!ctx->ioc_events) {
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			err = rtdm_event_timedwait(&ctx->ioc_event,
+						   ctx->config.event_timeout,
+						   &timeout_seq);
+			if (err) {
+				/* Device has been closed? */
+				if (err == -EIDRM)
+					err = -EBADF;
+				goto wait_unlock_out;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		}
+
+		ev.events = ctx->ioc_events;
+		ctx->ioc_events &= ~(RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO);
+
+		ev.last_timestamp = ctx->last_timestamp;
+		ev.rx_pending = ctx->in_npend;
+
+		if (ctx->in_history)
+			ev.rxpend_timestamp = ctx->in_history[ctx->in_head];
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg, &ev,
+						   sizeof(struct
+							  rtser_event));
+			else
+				memcpy(arg, &ev, sizeof(struct rtser_event));
+
+	      wait_unlock_out:
+		/* release the simple event waiter lock */
+		clear_bit(0, &ctx->ioc_event_lock);
+		break;
+	}
+
+	case RTSER_RTIOC_BREAK_CTL: {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTSER_BREAK_SET)
+			out_8(&ctx->port->psc->command,
+			      MPC52xx_PSC_START_BRK);
+		else
+			out_8(&ctx->port->psc->command,
+			      MPC52xx_PSC_STOP_BRK);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+#ifdef ISREADY
+	case RTIOC_PURGE: {
+		int fcr = 0;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTDM_PURGE_RX_BUFFER) {
+			ctx->in_head = 0;
+			ctx->in_tail = 0;
+			ctx->in_npend = 0;
+			ctx->status = 0;
+			fcr |= FCR_FIFO | FCR_RESET_RX;
+			rt_mpc52xx_uart_reg_in(mode, base, RHR);
+		}
+		if ((long)arg & RTDM_PURGE_TX_BUFFER) {
+			ctx->out_head = 0;
+			ctx->out_tail = 0;
+			ctx->out_npend = 0;
+			fcr |= FCR_FIFO | FCR_RESET_TX;
+		}
+		if (fcr) {
+			rt_mpc52xx_uart_reg_out(mode, base, FCR, fcr);
+			rt_mpc52xx_uart_reg_out(mode, base, FCR,
+					 FCR_FIFO | ctx->config.fifo_depth);
+		}
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+#endif
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+static ssize_t rt_mpc52xx_uart_read(struct rtdm_fd *fd, void *buf,
+				    size_t nbyte)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t read = 0;
+	int pending;
+	int block;
+	int subblock;
+	int in_pos;
+	char *out_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret = -EAGAIN;	/* for non-blocking read */
+	int nonblocking;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_rw_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* non-blocking is handled separately here */
+	nonblocking = (ctx->config.rx_timeout < 0);
+
+	/* only one reader allowed, stop any further attempts here */
+	if (test_and_set_bit(0, &ctx->in_lock))
+		return -EBUSY;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	while (1) {
+		if (ctx->status) {
+			if (ctx->status & RTSER_LSR_BREAK_IND)
+				ret = -EPIPE;
+			else
+				ret = -EIO;
+			ctx->saved_errors = ctx->status &
+			    (RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			     RTSER_LSR_FRAMING_ERR | RTSER_SOFT_OVERRUN_ERR);
+			ctx->status = 0;
+			break;
+		}
+
+		pending = ctx->in_npend;
+
+		if (pending > 0) {
+			block = subblock = (pending <= nbyte) ? pending : nbyte;
+			in_pos = ctx->in_head;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (in_pos + subblock > IN_BUFFER_SIZE) {
+				/* Treat the block between head and buffer end
+				   separately. */
+				subblock = IN_BUFFER_SIZE - in_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_to_user
+					    (fd, out_pos,
+					     &ctx->in_buf[in_pos],
+					     subblock) != 0) {
+						ret = -EFAULT;
+						goto break_unlocked;
+					}
+				} else
+					memcpy(out_pos, &ctx->in_buf[in_pos],
+					       subblock);
+
+				read += subblock;
+				out_pos += subblock;
+
+				subblock = block - subblock;
+				in_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_to_user(fd, out_pos,
+						      &ctx->in_buf[in_pos],
+						      subblock) != 0) {
+					ret = -EFAULT;
+					goto break_unlocked;
+				}
+			} else
+				memcpy(out_pos, &ctx->in_buf[in_pos], subblock);
+
+			read += subblock;
+			out_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->in_head =
+			    (ctx->in_head + block) & (IN_BUFFER_SIZE - 1);
+			if ((ctx->in_npend -= block) == 0)
+				ctx->ioc_events &= ~RTSER_EVENT_RXPEND;
+
+			if (nbyte == 0)
+				break; /* All requested bytes read. */
+
+			continue;
+		}
+
+		if (nonblocking)
+			/* ret was set to EAGAIN in case of a real
+			   non-blocking call or contains the error
+			   returned by rtdm_event_wait[_until] */
+			break;
+
+		ctx->in_nwait = nbyte;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->in_event,
+					   ctx->config.rx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			nonblocking = 1;
+			if (ctx->in_npend > 0) {
+				/* Final turn: collect pending bytes
+				   before exit. */
+				continue;
+			}
+
+			ctx->in_nwait = 0;
+			break;
+		}
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+break_unlocked:
+	/* Release the simple reader lock, */
+	clear_bit(0, &ctx->in_lock);
+
+	if ((read > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			   (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = read;
+
+	return ret;
+}
+
+static ssize_t rt_mpc52xx_uart_write(struct rtdm_fd *fd,
+				     const void *buf,
+				     size_t nbyte)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t written = 0;
+	int free;
+	int block;
+	int subblock;
+	int out_pos;
+	char *in_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_read_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* Make write operation atomic. */
+	ret = rtdm_mutex_timedlock(&ctx->out_lock, ctx->config.rx_timeout,
+				   &timeout_seq);
+	if (ret)
+		return ret;
+
+	while (nbyte > 0) {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		free = OUT_BUFFER_SIZE - ctx->out_npend;
+
+		if (free > 0) {
+			block = subblock = (nbyte <= free) ? nbyte : free;
+			out_pos = ctx->out_tail;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (out_pos + subblock > OUT_BUFFER_SIZE) {
+				/* Treat the block between head and buffer
+				   end separately. */
+				subblock = OUT_BUFFER_SIZE - out_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_from_user
+					    (fd,
+					     &ctx->out_buf[out_pos],
+					     in_pos, subblock) != 0) {
+						ret = -EFAULT;
+						break;
+					}
+				} else
+					memcpy(&ctx->out_buf[out_pos], in_pos,
+					       subblock);
+
+				written += subblock;
+				in_pos += subblock;
+
+				subblock = block - subblock;
+				out_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_from_user
+				    (fd, &ctx->out_buf[out_pos],
+				     in_pos, subblock) != 0) {
+					ret = -EFAULT;
+					break;
+				}
+			} else
+				memcpy(&ctx->out_buf[out_pos], in_pos, block);
+
+			written += subblock;
+			in_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->out_tail =
+			    (ctx->out_tail + block) & (OUT_BUFFER_SIZE - 1);
+			ctx->out_npend += block;
+
+			/* Mark shift register not empty */
+			ctx->ioc_events &= ~RTSER_EVENT_TXEMPTY;
+			ctx->tx_empty = 0;
+
+			psc_start_tx(ctx);
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+			continue;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->out_event,
+					   ctx->config.tx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+			if (ret == -EWOULDBLOCK) {
+				/* Fix error code for non-blocking mode. */
+				ret = -EAGAIN;
+			}
+			break;
+		}
+	}
+
+	rtdm_mutex_unlock(&ctx->out_lock);
+
+	if ((written > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			      (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = written;
+
+	return ret;
+}
+
+static struct rtdm_driver mpc52xx_uart_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(imx_uart,
+						    RTDM_CLASS_SERIAL,
+						    RTDM_SUBCLASS_16550A,
+						    RTSER_PROFILE_VER),
+	.device_count		= MPC52xx_PSC_MAXNUM,
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.context_size		= sizeof(struct rt_mpc52xx_uart_ctx),
+	.ops = {
+		.open		= rt_mpc52xx_uart_open,
+		.close		= rt_mpc52xx_uart_close,
+		.ioctl_rt	= rt_mpc52xx_uart_ioctl,
+		.ioctl_nrt	= rt_mpc52xx_uart_ioctl,
+		.read_rt	= rt_mpc52xx_uart_read,
+		.write_rt	= rt_mpc52xx_uart_write,
+	},
+};
+
+static int rt_mpc52xx_uart_of_probe(struct platform_device *op)
+{
+	struct rt_mpc52xx_uart_port *port;
+	struct rtdm_device *dev;
+	struct resource res;
+	int ret, idx;
+
+	dev_dbg(&op->dev, "mpc52xx_uart_probe(op=%p)\n", op);
+
+	/* Check validity & presence */
+	for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++)
+		if (rt_mpc52xx_uart_nodes[idx] == op->dev.of_node)
+			break;
+	if (idx >= MPC52xx_PSC_MAXNUM)
+		return -EINVAL;
+
+	port = kmalloc(sizeof(*port), GFP_KERNEL);
+	if (!port) {
+		dev_err(&op->dev, "Could allocate port space\n");
+		return -ENOMEM;
+	}
+	port->dev = &op->dev;
+
+	/*
+	 * Set the uart clock to the input clock of the psc, the different
+	 * prescalers are taken into account in the set_baudrate() methods
+	 * of the respective chip
+	 */
+	port->uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node);
+	if (port->uartclk == 0) {
+		dev_err(&op->dev, "Could not find uart clock frequency\n");
+		ret = -EINVAL;
+		goto out_kfree_port;
+	}
+
+	/* Fetch register locations */
+	ret = of_address_to_resource(op->dev.of_node, 0, &res);
+	if (ret) {
+		dev_err(&op->dev, "Could not get resources\n");
+		goto out_kfree_port;
+	}
+	port->num = ((res.start >> 8) & 0xf) / 2;
+	if (port->num < 6)
+		port->num++;
+
+	if (!request_mem_region(res.start, resource_size(&res),
+				RT_MPC52XX_UART_DRVNAM)) {
+		ret = -EBUSY;
+		goto out_kfree_port;
+	}
+
+	port->psc = ioremap(res.start, resource_size(&res));
+	if (!port->psc) {
+		dev_err(&op->dev, "Could not map PSC registers\n");
+		ret = -ENOMEM;
+		goto out_release_mem_region;
+	}
+	port->fifo = (struct mpc52xx_psc_fifo __iomem *)(port->psc + 1);
+
+	port->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+	if (port->irq <= 0) {
+		dev_err(&op->dev, "Could not get irq\n");
+		ret = -ENODEV;
+		goto out_iounmap;
+	}
+
+	dev = kmalloc(sizeof(struct rtdm_device), GFP_KERNEL);
+	if (!dev) {
+		dev_err(&op->dev, "Could allocate device context\n");
+		ret = -ENOMEM;
+		goto out_dispose_irq_mapping;
+	}
+
+	dev->driver = &mpc52xx_uart_driver;
+	dev->label = "rtserPSC%d";
+	dev->device_data = port;
+
+	rt_mpc52xx_uart_init_hw(port);
+
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		goto out_kfree_dev;
+
+	dev_set_drvdata(&op->dev, dev);
+
+	dev_info(&op->dev, "%s on PSC%d at 0x%p, irq=%d, clk=%i\n",
+		 dev->name, port->num, port->psc, port->irq,
+		 port->uartclk);
+
+	return 0;
+
+out_kfree_dev:
+	kfree(dev);
+out_dispose_irq_mapping:
+	irq_dispose_mapping(port->irq);
+out_iounmap:
+	iounmap(port->psc);
+out_release_mem_region:
+	release_mem_region(res.start, resource_size(&res));
+out_kfree_port:
+	kfree(port);
+
+	return ret;
+}
+
+static int rt_mpc52xx_uart_of_remove(struct platform_device *op)
+{
+	struct rtdm_device *dev = dev_get_drvdata(&op->dev);
+	struct rt_mpc52xx_uart_port *port = dev->device_data;
+	struct resource res;
+
+	dev_set_drvdata(&op->dev, NULL);
+
+	rtdm_dev_unregister(dev);
+	irq_dispose_mapping(port->irq);
+	iounmap(port->psc);
+	if (!of_address_to_resource(op->dev.of_node, 0, &res))
+		release_mem_region(res.start, resource_size(&res));
+	kfree(port);
+	kfree(dev);
+
+	return 0;
+}
+
+static struct of_device_id rt_mpc52xx_uart_of_match[] = {
+	{ .compatible = "fsl,mpc5200b-psc-uart", },
+	{ .compatible = "fsl,mpc5200-psc-uart", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, rt_mpc52xx_uart_of_match);
+
+static struct platform_driver rt_mpc52xx_uart_of_driver = {
+	.probe = rt_mpc52xx_uart_of_probe,
+	.remove	=  rt_mpc52xx_uart_of_remove,
+	.driver = {
+		.name = "rt-mpc52xx-psc-uart",
+		.owner = THIS_MODULE,
+		.of_match_table = rt_mpc52xx_uart_of_match,
+	},
+};
+
+static void rt_mpc52xx_uart_of_enumerate(void)
+{
+	struct device_node *np;
+	int idx = 0;
+
+	/* Assign index to each PSC in device tree line the linux driver does */
+	for_each_matching_node(np, rt_mpc52xx_uart_of_match) {
+		of_node_get(np);
+		rt_mpc52xx_uart_nodes[idx] = np;
+		idx++;
+	}
+}
+
+static int __init rt_mpc52xx_uart_init(void)
+{
+	int ret;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	printk(KERN_INFO "RTserial: MPC52xx PSC UART driver\n");
+
+	rt_mpc52xx_uart_of_enumerate();
+
+	ret = platform_driver_register(&rt_mpc52xx_uart_of_driver);
+	if (ret) {
+		printk(KERN_ERR
+		       "%s; Could not register  driver (err=%d)\n",
+		       __func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void __exit rt_mpc52xx_uart_exit(void)
+{
+	platform_driver_unregister(&rt_mpc52xx_uart_of_driver);
+}
+
+module_init(rt_mpc52xx_uart_init);
+module_exit(rt_mpc52xx_uart_exit);
+++ linux-patched/drivers/xenomai/serial/16550A_io.h	2022-03-21 12:58:31.219871116 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/serial/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Manages the I/O access method of the driver. */
+
+typedef enum { MODE_PIO, MODE_MMIO } io_mode_t;
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_PIO) || \
+    defined(CONFIG_XENO_DRIVERS_16550A_ANY)
+static unsigned long io[MAX_DEVICES];
+module_param_array(io, ulong, NULL, 0400);
+MODULE_PARM_DESC(io, "I/O port addresses of the serial devices");
+#endif /* CONFIG_XENO_DRIVERS_16550A_PIO || CONFIG_XENO_DRIVERS_16550A_ANY */
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_MMIO) || \
+    defined(CONFIG_XENO_DRIVERS_16550A_ANY)
+static unsigned long mem[MAX_DEVICES];
+static void *mapped_io[MAX_DEVICES];
+module_param_array(mem, ulong, NULL, 0400);
+MODULE_PARM_DESC(mem, "I/O memory addresses of the serial devices");
+#endif /* CONFIG_XENO_DRIVERS_16550A_MMIO || CONFIG_XENO_DRIVERS_16550A_ANY */
+
+#ifdef CONFIG_XENO_DRIVERS_16550A_PIO
+
+#define RT_16550_IO_INLINE inline
+
+extern void *mapped_io[]; /* dummy */
+
+static inline unsigned long rt_16550_addr_param(int dev_id)
+{
+	return io[dev_id];
+}
+
+static inline int rt_16550_addr_param_valid(int dev_id)
+{
+	return 1;
+}
+
+static inline unsigned long rt_16550_base_addr(int dev_id)
+{
+	return io[dev_id];
+}
+
+static inline io_mode_t rt_16550_io_mode(int dev_id)
+{
+	return MODE_PIO;
+}
+
+static inline io_mode_t
+rt_16550_io_mode_from_ctx(struct rt_16550_context *ctx)
+{
+	return MODE_PIO;
+}
+
+static inline void
+rt_16550_init_io_ctx(int dev_id, struct rt_16550_context *ctx)
+{
+	ctx->base_addr = io[dev_id];
+}
+
+#elif defined(CONFIG_XENO_DRIVERS_16550A_MMIO)
+
+#define RT_16550_IO_INLINE inline
+
+extern unsigned long io[]; /* dummy */
+
+static inline unsigned long rt_16550_addr_param(int dev_id)
+{
+	return mem[dev_id];
+}
+
+static inline int rt_16550_addr_param_valid(int dev_id)
+{
+	return 1;
+}
+
+static inline unsigned long rt_16550_base_addr(int dev_id)
+{
+	return (unsigned long)mapped_io[dev_id];
+}
+
+static inline io_mode_t rt_16550_io_mode(int dev_id)
+{
+	return MODE_MMIO;
+}
+
+static inline io_mode_t
+rt_16550_io_mode_from_ctx(struct rt_16550_context *ctx)
+{
+	return MODE_MMIO;
+}
+
+static inline void
+rt_16550_init_io_ctx(int dev_id, struct rt_16550_context *ctx)
+{
+	ctx->base_addr = (unsigned long)mapped_io[dev_id];
+}
+
+#elif defined(CONFIG_XENO_DRIVERS_16550A_ANY)
+
+#define RT_16550_IO_INLINE /* uninline */
+
+static inline unsigned long rt_16550_addr_param(int dev_id)
+{
+	return (io[dev_id]) ? io[dev_id] : mem[dev_id];
+}
+
+static inline int rt_16550_addr_param_valid(int dev_id)
+{
+	return !(io[dev_id] && mem[dev_id]);
+}
+
+static inline unsigned long rt_16550_base_addr(int dev_id)
+{
+	return (io[dev_id]) ? io[dev_id] : (unsigned long)mapped_io[dev_id];
+}
+
+static inline io_mode_t rt_16550_io_mode(int dev_id)
+{
+	return (io[dev_id]) ? MODE_PIO : MODE_MMIO;
+}
+
+static inline io_mode_t
+rt_16550_io_mode_from_ctx(struct rt_16550_context *ctx)
+{
+	return ctx->io_mode;
+}
+
+static inline void
+rt_16550_init_io_ctx(int dev_id, struct rt_16550_context *ctx)
+{
+	if (io[dev_id]) {
+		ctx->base_addr = io[dev_id];
+		ctx->io_mode   = MODE_PIO;
+	} else {
+		ctx->base_addr = (unsigned long)mapped_io[dev_id];
+		ctx->io_mode   = MODE_MMIO;
+	}
+}
+
+#else
+# error Unsupported I/O access method
+#endif
+
+static RT_16550_IO_INLINE u8
+rt_16550_reg_in(io_mode_t io_mode, unsigned long base, int off)
+{
+	switch (io_mode) {
+	case MODE_PIO:
+		return inb(base + off);
+	default: /* MODE_MMIO */
+		return readb((void *)base + off);
+	}
+}
+
+static RT_16550_IO_INLINE void
+rt_16550_reg_out(io_mode_t io_mode, unsigned long base, int off, u8 val)
+{
+	switch (io_mode) {
+	case MODE_PIO:
+		outb(val, base + off);
+		break;
+	case MODE_MMIO:
+		writeb(val, (void *)base + off);
+		break;
+	}
+}
+
+static int rt_16550_init_io(int dev_id, char* name)
+{
+	switch (rt_16550_io_mode(dev_id)) {
+	case MODE_PIO:
+		if (!request_region(rt_16550_addr_param(dev_id), 8, name))
+			return -EBUSY;
+		break;
+	case MODE_MMIO:
+		mapped_io[dev_id] = ioremap(rt_16550_addr_param(dev_id), 8);
+		if (!mapped_io[dev_id])
+			return -EBUSY;
+		break;
+	}
+	return 0;
+}
+
+static void rt_16550_release_io(int dev_id)
+{
+	switch (rt_16550_io_mode(dev_id)) {
+	case MODE_PIO:
+		release_region(io[dev_id], 8);
+		break;
+	case MODE_MMIO:
+		iounmap(mapped_io[dev_id]);
+		break;
+	}
+}
+++ linux-patched/drivers/xenomai/serial/Makefile	2022-03-21 12:58:31.212871185 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/transfer.c	1970-01-01 01:00:00.000000000 +0100
+
+obj-$(CONFIG_XENO_DRIVERS_16550A) += xeno_16550A.o
+obj-$(CONFIG_XENO_DRIVERS_MPC52XX_UART) += xeno_mpc52xx_uart.o
+obj-$(CONFIG_XENO_DRIVERS_IMX_UART) += xeno_imx_uart.o
+
+xeno_16550A-y := 16550A.o
+xeno_mpc52xx_uart-y := mpc52xx_uart.o
+xeno_imx_uart-y := rt_imx_uart.o
+++ linux-patched/drivers/xenomai/analogy/transfer.c	2022-03-21 12:58:31.204871263 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, transfer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+#include "proc.h"
+
+/* --- Initialization / cleanup / cancel functions --- */
+
+int a4l_precleanup_transfer(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev;
+	struct a4l_transfer *tsf;
+	int i, err = 0;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	if (tsf == NULL) {
+		__a4l_err("a4l_precleanup_transfer: "
+			  "incoherent status, transfer block not reachable\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < tsf->nb_subd; i++) {
+		unsigned long *status = &tsf->subds[i]->status;
+
+		__a4l_dbg(1, core_dbg, "subd[%d]->status=0x%08lx\n", i, *status);
+
+		if (test_and_set_bit(A4L_SUBD_BUSY, status)) {
+			__a4l_err("a4l_precleanup_transfer: "
+				  "device busy, acquisition occuring\n");
+			err = -EBUSY;
+			goto out_error;
+		} else
+			set_bit(A4L_SUBD_CLEAN, status);
+	}
+
+	return 0;
+
+out_error:
+	for (i = 0; i < tsf->nb_subd; i++) {
+		unsigned long *status = &tsf->subds[i]->status;
+
+		if (test_bit(A4L_SUBD_CLEAN, status)){
+			clear_bit(A4L_SUBD_BUSY, status);
+			clear_bit(A4L_SUBD_CLEAN, status);
+		}
+	}
+
+	return err;
+}
+
+int a4l_cleanup_transfer(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev;
+	struct a4l_transfer *tsf;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	/* Releases the pointers tab, if need be */
+	if (tsf->subds != NULL) {
+		rtdm_free(tsf->subds);
+	}
+
+	memset(tsf, 0, sizeof(struct a4l_transfer));
+
+	return 0;
+}
+
+void a4l_presetup_transfer(struct a4l_device_context *cxt)
+{
+	struct a4l_device *dev = NULL;
+	struct a4l_transfer *tsf;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	/* Clear the structure */
+	memset(tsf, 0, sizeof(struct a4l_transfer));
+
+	tsf->default_bufsize = A4L_BUF_DEFSIZE;
+
+	/* 0 is also considered as a valid IRQ, then
+	   the IRQ number must be initialized with another value */
+	tsf->irq_desc.irq = A4L_IRQ_UNUSED;
+}
+
+int a4l_setup_transfer(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = NULL;
+	struct a4l_transfer *tsf;
+	struct list_head *this;
+	int i = 0, ret = 0;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	/* Recovers the subdevices count
+	   (as they are registered in a linked list */
+	list_for_each(this, &dev->subdvsq) {
+		tsf->nb_subd++;
+	}
+
+	__a4l_dbg(1, core_dbg, "nb_subd=%d\n", tsf->nb_subd);
+
+	/* Allocates a suitable tab for the subdevices */
+	tsf->subds = rtdm_malloc(tsf->nb_subd * sizeof(struct a4l_subdevice *));
+	if (tsf->subds == NULL) {
+		__a4l_err("a4l_setup_transfer: call1(alloc) failed \n");
+		ret = -ENOMEM;
+		goto out_setup_tsf;
+	}
+
+	/* Recovers the subdevices pointers */
+	list_for_each(this, &dev->subdvsq) {
+		tsf->subds[i++] = list_entry(this, struct a4l_subdevice, list);
+	}
+
+out_setup_tsf:
+
+	if (ret != 0)
+		a4l_cleanup_transfer(cxt);
+
+	return ret;
+}
+
+/* --- IRQ handling section --- */
+
+int a4l_request_irq(struct a4l_device * dev,
+		    unsigned int irq,
+		    a4l_irq_hdlr_t handler,
+		    unsigned long flags, void *cookie)
+{
+	int ret;
+
+	if (dev->transfer.irq_desc.irq != A4L_IRQ_UNUSED)
+		return -EBUSY;
+
+	ret = __a4l_request_irq(&dev->transfer.irq_desc, irq, handler, flags,
+		cookie);
+	if (ret != 0) {
+		__a4l_err("a4l_request_irq: IRQ registration failed\n");
+		dev->transfer.irq_desc.irq = A4L_IRQ_UNUSED;
+	}
+
+	return ret;
+}
+
+int a4l_free_irq(struct a4l_device * dev, unsigned int irq)
+{
+
+	int ret = 0;
+
+	if (dev->transfer.irq_desc.irq != irq)
+		return -EINVAL;
+
+	/* There is less need to use a spinlock
+	   than for a4l_request_irq() */
+	ret = __a4l_free_irq(&dev->transfer.irq_desc);
+
+	if (ret == 0)
+		dev->transfer.irq_desc.irq = A4L_IRQ_UNUSED;
+
+	return ret;
+}
+
+unsigned int a4l_get_irq(struct a4l_device * dev)
+{
+	return dev->transfer.irq_desc.irq;
+}
+
+/* --- Proc section --- */
+
+#ifdef CONFIG_PROC_FS
+
+int a4l_rdproc_transfer(struct seq_file *seq, void *v)
+{
+	struct a4l_transfer *transfer = (struct a4l_transfer *) seq->private;
+	int i;
+
+	if (v != SEQ_START_TOKEN)
+		return -EINVAL;
+
+	seq_printf(seq, "--  Subdevices --\n\n");
+	seq_printf(seq, "| idx | type\n");
+
+	/* Gives the subdevice type's name */
+	for (i = 0; i < transfer->nb_subd; i++) {
+		char *type;
+		switch (transfer->subds[i]->flags & A4L_SUBD_TYPES) {
+		case A4L_SUBD_UNUSED:
+			type = "Unused subdevice";
+			break;
+		case A4L_SUBD_AI:
+			type = "Analog input subdevice";
+			break;
+		case A4L_SUBD_AO:
+			type = "Analog output subdevice";
+			break;
+		case A4L_SUBD_DI:
+			type = "Digital input subdevice";
+			break;
+		case A4L_SUBD_DO:
+			type = "Digital output subdevice";
+			break;
+		case A4L_SUBD_DIO:
+			type = "Digital input/output subdevice";
+			break;
+		case A4L_SUBD_COUNTER:
+			type = "Counter subdevice";
+			break;
+		case A4L_SUBD_TIMER:
+			type = "Timer subdevice";
+			break;
+		case A4L_SUBD_MEMORY:
+			type = "Memory subdevice";
+			break;
+		case A4L_SUBD_CALIB:
+			type = "Calibration subdevice";
+			break;
+		case A4L_SUBD_PROC:
+			type = "Processor subdevice";
+			break;
+		case A4L_SUBD_SERIAL:
+			type = "Serial subdevice";
+			break;
+		default:
+			type = "Unknown subdevice";
+		}
+
+		seq_printf(seq, "|  %02d | %s\n", i, type);
+	}
+
+	return 0;
+}
+
+#endif /* CONFIG_PROC_FS */
+++ linux-patched/drivers/xenomai/analogy/Kconfig	2022-03-21 12:58:31.197871331 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/testing/fake.c	1970-01-01 01:00:00.000000000 +0100
+menu "ANALOGY drivers"
+
+config XENO_DRIVERS_ANALOGY
+	tristate "ANALOGY interface"
+	help
+
+	ANALOGY is a framework aimed at supporting data acquisition
+	devices.
+
+config XENO_DRIVERS_ANALOGY_DEBUG
+       depends on XENO_DRIVERS_ANALOGY
+       bool "Analogy debug trace"
+       default n
+       help
+
+       Enable debugging traces in Analogy so as to monitor Analogy's
+       core and drivers behaviours.
+
+config XENO_DRIVERS_ANALOGY_DEBUG_FTRACE
+       depends on XENO_DRIVERS_ANALOGY_DEBUG
+       bool "Analogy debug ftrace"
+       default n
+       help
+
+       Route the Analogy a4l_dbg and a4l_info statements to /sys/kernel/debug/
+
+config XENO_DRIVERS_ANALOGY_DEBUG_LEVEL
+       depends on XENO_DRIVERS_ANALOGY_DEBUG
+       int "Analogy core debug level threshold"
+       default 0
+       help
+
+       Define the level above which the debugging traces will not be
+       displayed.
+
+       WARNING: this threshold is only applied on the Analogy
+       core. That will not affect the driver.
+
+config XENO_DRIVERS_ANALOGY_DRIVER_DEBUG_LEVEL
+       depends on XENO_DRIVERS_ANALOGY_DEBUG
+       int "Analogy driver debug level threshold"
+       default 0
+       help
+
+       Define the level above which the debugging traces will not be
+       displayed.
+
+       WARNING: this threshold is only applied on the Analogy
+       driver. That will not affect the core.
+
+source "drivers/xenomai/analogy/testing/Kconfig"
+source "drivers/xenomai/analogy/intel/Kconfig"
+source "drivers/xenomai/analogy/national_instruments/Kconfig"
+source "drivers/xenomai/analogy/sensoray/Kconfig"
+
+endmenu
+++ linux-patched/drivers/xenomai/analogy/testing/fake.c	2022-03-21 12:58:31.190871399 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/testing/Kconfig	1970-01-01 01:00:00.000000000 +0100
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#define TASK_PERIOD 1000000
+
+#define AI_SUBD 0
+#define DIO_SUBD 1
+#define AO_SUBD 2
+#define AI2_SUBD 3
+
+#define TRANSFER_SIZE 0x1000
+
+/* --- Driver related structures --- */
+struct fake_priv {
+	/* Attach configuration parameters
+	   (they should be relocated in ai_priv) */
+	unsigned long amplitude_div;
+	unsigned long quanta_cnt;
+
+	/* Task descriptor */
+	rtdm_task_t task;
+
+	/* Statuses of the asynchronous subdevices */
+	int ai_running;
+	int ao_running;
+	int ai2_running;
+};
+
+struct ai_priv {
+
+	/* Specific timing fields */
+	unsigned long scan_period_ns;
+	unsigned long convert_period_ns;
+	unsigned long current_ns;
+	unsigned long reminder_ns;
+	unsigned long long last_ns;
+
+	/* Misc fields */
+	unsigned long amplitude_div;
+	unsigned long quanta_cnt;
+};
+
+struct ao_ai2_priv {
+	/* Asynchronous loop stuff */
+	uint8_t buffer[TRANSFER_SIZE];
+	int count;
+	/* Synchronous loop stuff */
+	uint16_t insn_value;
+};
+
+struct dio_priv {
+	/* Bits status */
+	uint16_t bits_values;
+};
+
+/* --- Channels / ranges part --- */
+
+/* Channels descriptors */
+
+static struct a4l_channels_desc analog_chandesc = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 16},
+	},
+};
+
+static struct a4l_channels_desc dio_chandesc = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 16,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+/* Ranges tab */
+static struct a4l_rngtab analog_rngtab = {
+	.length = 2,
+	.rngs = {
+		RANGE_V(-5,5),
+		RANGE_V(-10,10),
+	},
+};
+/* Ranges descriptor */
+static struct a4l_rngdesc analog_rngdesc = RNG_GLOBAL(analog_rngtab);
+
+/* Command options masks */
+
+static struct a4l_cmd_desc ai_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW,
+	.scan_begin_src = TRIG_TIMER,
+	.convert_src = TRIG_NOW | TRIG_TIMER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+static struct a4l_cmd_desc ao_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT,
+	.scan_begin_src = TRIG_TIMER,
+	.convert_src = TRIG_NOW | TRIG_TIMER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+/* --- Analog input simulation --- */
+
+/* --- Values generation for 1st AI --- */
+
+static inline uint16_t ai_value_output(struct ai_priv *priv)
+{
+	static uint16_t output_tab[8] = {
+		0x0001, 0x2000, 0x4000, 0x6000,
+		0x8000, 0xa000, 0xc000, 0xffff
+	};
+	static unsigned int output_idx;
+	static DEFINE_RTDM_LOCK(output_lock);
+
+	unsigned long flags;
+	unsigned int idx;
+
+	rtdm_lock_get_irqsave(&output_lock, flags);
+
+	output_idx += priv->quanta_cnt;
+	if(output_idx == 8)
+		output_idx = 0;
+	idx = output_idx;
+
+	rtdm_lock_put_irqrestore(&output_lock, flags);
+
+	return output_tab[idx] / priv->amplitude_div;
+}
+
+int ai_push_values(struct a4l_subdevice *subd)
+{
+	uint64_t now_ns, elapsed_ns = 0;
+	struct a4l_cmd_desc *cmd;
+	struct ai_priv *priv;
+	int i = 0;
+
+	if (!subd)
+		return -EINVAL;
+
+	priv = (struct ai_priv *)subd->priv;
+
+	cmd = a4l_get_cmd(subd);
+	if (!cmd)
+		return -EPIPE;
+
+	now_ns = a4l_get_time();
+	elapsed_ns += now_ns - priv->last_ns + priv->reminder_ns;
+	priv->last_ns = now_ns;
+
+	while(elapsed_ns >= priv->scan_period_ns) {
+		int j;
+
+		for(j = 0; j < cmd->nb_chan; j++) {
+			uint16_t value = ai_value_output(priv);
+			a4l_buf_put(subd, &value, sizeof(uint16_t));
+		}
+
+		elapsed_ns -= priv->scan_period_ns;
+		i++;
+	}
+
+	priv->current_ns += i * priv->scan_period_ns;
+	priv->reminder_ns = elapsed_ns;
+
+	if (i != 0)
+		a4l_buf_evt(subd, 0);
+
+	return 0;
+}
+
+/* --- Data retrieval for AO --- */
+
+int ao_pull_values(struct a4l_subdevice *subd)
+{
+	struct ao_ai2_priv *priv = (struct ao_ai2_priv *)subd->priv;
+	int err;
+
+	/* Let's have a look at how many samples are available */
+	priv->count = a4l_buf_count(subd) < TRANSFER_SIZE ?
+		      a4l_buf_count(subd) : TRANSFER_SIZE;
+
+	if (!priv->count)
+		return 0;
+
+	err = a4l_buf_get(subd, priv->buffer, priv->count);
+	if (err < 0) {
+		a4l_err(subd->dev, "ao_get_values: a4l_buf_get failed (err=%d)\n", err);
+		priv->count = 0;
+		return err;
+
+	}
+
+	a4l_info(subd->dev, " %d bytes added to private buffer from async p=%p\n",
+		priv->count, subd->buf->buf);
+
+	a4l_buf_evt(subd, 0);
+
+	return 0;
+}
+
+/* --- Data redirection for 2nd AI (from AO) --- */
+
+int ai2_push_values(struct a4l_subdevice *subd)
+{
+	struct ao_ai2_priv *priv = *((struct ao_ai2_priv **)subd->priv);
+	int err = 0;
+
+	if (priv->count) {
+		err = a4l_buf_put(subd, priv->buffer, priv->count);
+
+		/* If there is no more place in the asynchronous
+		buffer, data are likely to be dropped; it is just a
+		test driver so no need to implement trickier mechanism */
+		err = (err == -EAGAIN) ? 0 : err;
+
+		a4l_info(subd->dev, "%d bytes added to async buffer p=%p\n",
+			priv->count, subd->buf->buf);
+
+		priv->count = 0;
+		if (err < 0)
+			a4l_err(subd->dev,
+				"ai2_push_values: "
+				"a4l_buf_put failed (err=%d)\n", err);
+		else
+			a4l_buf_evt(subd, 0);
+	}
+
+	return err;
+}
+
+/* --- Asynchronous AI functions --- */
+
+static int ai_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+	struct ai_priv *ai_priv = (struct ai_priv *)subd->priv;
+
+	ai_priv->scan_period_ns = cmd->scan_begin_arg;
+	ai_priv->convert_period_ns = (cmd->convert_src==TRIG_TIMER)?
+		cmd->convert_arg:0;
+
+	a4l_dbg(1, drv_dbg, subd->dev, "scan_period=%luns convert_period=%luns\n",
+		ai_priv->scan_period_ns, ai_priv->convert_period_ns);
+
+	ai_priv->last_ns = a4l_get_time();
+
+	ai_priv->current_ns = ((unsigned long)ai_priv->last_ns);
+	ai_priv->reminder_ns = 0;
+
+	priv->ai_running = 1;
+
+	return 0;
+
+}
+
+static int ai_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	if(cmd->scan_begin_src == TRIG_TIMER)
+	{
+		if (cmd->scan_begin_arg < 1000)
+			return -EINVAL;
+
+		if (cmd->convert_src == TRIG_TIMER &&
+		    cmd->scan_begin_arg < (cmd->convert_arg * cmd->nb_chan))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void ai_cancel(struct a4l_subdevice *subd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+
+	priv->ai_running = 0;
+}
+
+static void ai_munge(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	int i;
+
+	for(i = 0; i < size / sizeof(uint16_t); i++)
+		((uint16_t *)buf)[i] += 1;
+}
+
+/* --- Asynchronous A0 functions --- */
+
+int ao_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	return 0;
+}
+
+int ao_trigger(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ao_running = 1;
+	return 0;
+}
+
+void ao_cancel(struct a4l_subdevice *subd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+	struct ao_ai2_priv *ao_priv = (struct ao_ai2_priv *)subd->priv;
+	int running;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ao_running = 0;
+
+	running = priv->ai2_running;
+	if (running) {
+		struct a4l_subdevice *ai2_subd =
+			(struct a4l_subdevice *)a4l_get_subd(subd->dev, AI2_SUBD);
+		/* Here, we have not saved the required amount of
+		   data; so, we cannot know whether or not, it is the
+		   end of the acquisition; that is why we force it */
+		priv->ai2_running = 0;
+		ao_priv->count = 0;
+
+		a4l_info(subd->dev, "subd %d cancelling subd %d too \n",
+			subd->idx, AI2_SUBD);
+
+		a4l_buf_evt(ai2_subd, A4L_BUF_EOA);
+	}
+}
+
+/* --- Asynchronous 2nd AI functions --- */
+
+int ai2_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ai2_running = 1;
+	return 0;
+}
+
+void ai2_cancel(struct a4l_subdevice *subd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+	struct ao_ai2_priv *ai2_priv = *((struct ao_ai2_priv **)subd->priv);
+
+	int running;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ai2_running = 0;
+
+	running = priv->ao_running;
+	if (running) {
+		struct a4l_subdevice *ao_subd =
+			(struct a4l_subdevice *)a4l_get_subd(subd->dev, AO_SUBD);
+		/* Here, we have not saved the required amount of
+		   data; so, we cannot know whether or not, it is the
+		   end of the acquisition; that is why we force it */
+		priv->ao_running = 0;
+		ai2_priv->count = 0;
+
+		a4l_info(subd->dev, "subd %d cancelling subd %d too \n",
+			 subd->idx, AO_SUBD);
+
+		a4l_buf_evt(ao_subd, A4L_BUF_EOA);
+	}
+
+}
+
+
+/* --- Synchronous AI functions --- */
+
+static int ai_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ai_priv *priv = (struct ai_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+	int i;
+
+	for(i = 0; i < insn->data_size / sizeof(uint16_t); i++)
+		data[i] = ai_value_output(priv);
+
+	return 0;
+}
+
+/* --- Synchronous DIO function --- */
+
+static int dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct dio_priv *priv = (struct dio_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	if (insn->data_size != 2 * sizeof(uint16_t))
+		return -EINVAL;
+
+	if (data[0] != 0) {
+		priv->bits_values &= ~(data[0]);
+		priv->bits_values |= (data[0] & data[1]);
+	}
+
+	data[1] = priv->bits_values;
+
+	return 0;
+}
+
+/* --- Synchronous AO + AI2 functions --- */
+
+int ao_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ao_ai2_priv *priv = (struct ao_ai2_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Retrieves the value to memorize */
+	priv->insn_value = data[0];
+
+	return 0;
+}
+
+int ai2_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ao_ai2_priv *priv = *((struct ao_ai2_priv **)subd->priv);
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Sets the memorized value */
+	data[0] = priv->insn_value;
+
+	return 0;
+}
+
+/* --- Global task part --- */
+
+/* One task is enough for all the asynchronous subdevices, it is just a fake
+ * driver after all
+ */
+static void task_proc(void *arg)
+{
+	struct a4l_subdevice *ai_subd, *ao_subd, *ai2_subd;
+	struct a4l_device *dev;
+	struct fake_priv *priv;
+	int running;
+
+	dev = arg;
+	ai_subd = a4l_get_subd(dev, AI_SUBD);
+	ao_subd = a4l_get_subd(dev, AO_SUBD);
+	ai2_subd = a4l_get_subd(dev, AI2_SUBD);
+
+	priv = dev->priv;
+
+	while(!rtdm_task_should_stop()) {
+
+		/* copy sample static data from the subd private buffer to the
+		 * asynchronous buffer
+		 */
+		running = priv->ai_running;
+		if (running && ai_push_values(ai_subd) < 0) {
+			/* on error, wait for detach to destroy the task */
+			rtdm_task_sleep(RTDM_TIMEOUT_INFINITE);
+			continue;
+		}
+
+		/*
+		 * pull the data from the output subdevice (asynchronous buffer)
+		 * into its private buffer
+		 */
+		running = priv->ao_running;
+		if (running && ao_pull_values(ao_subd) < 0) {
+			rtdm_task_sleep(RTDM_TIMEOUT_INFINITE);
+			continue;
+		}
+
+		running = priv->ai2_running;
+		/*
+		 * then loop it to the ai2 subd since their private data is shared: so
+		 * pull the data from the private buffer back into the device's
+		 * asynchronous buffer
+		 */
+		if (running && ai2_push_values(ai2_subd) < 0) {
+			rtdm_task_sleep(RTDM_TIMEOUT_INFINITE);
+			continue;
+		}
+
+		rtdm_task_sleep(TASK_PERIOD);
+	}
+}
+
+/* --- Initialization functions --- */
+
+void setup_ai_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_AI;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &analog_rngdesc;
+	subd->chan_desc = &analog_chandesc;
+	subd->do_cmd = ai_cmd;
+	subd->do_cmdtest = ai_cmdtest;
+	subd->cancel = ai_cancel;
+	subd->munge = ai_munge;
+	subd->cmd_mask = &ai_cmd_mask;
+	subd->insn_read = ai_insn_read;
+}
+
+void setup_dio_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_DIO;
+	subd->chan_desc = &dio_chandesc;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = dio_insn_bits;
+}
+
+void setup_ao_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_AO;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &analog_rngdesc;
+	subd->chan_desc = &analog_chandesc;
+	subd->do_cmd = ao_cmd;
+	subd->cancel = ao_cancel;
+	subd->trigger = ao_trigger;
+	subd->cmd_mask = &ao_cmd_mask;
+	subd->insn_write = ao_insn_write;
+}
+
+void setup_ai2_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_AI;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &analog_rngdesc;
+	subd->chan_desc = &analog_chandesc;
+	subd->do_cmd = ai2_cmd;
+	subd->cancel = ai2_cancel;
+	subd->cmd_mask = &ai_cmd_mask;
+	subd->insn_read = ai2_insn_read;
+}
+
+/* --- Attach / detach functions ---  */
+
+int test_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	typedef void (*setup_subd_function) (struct a4l_subdevice *subd);
+	struct fake_priv *priv = (struct fake_priv *) dev->priv;
+	struct a4l_subdevice *subd;
+	unsigned long tmp;
+	struct ai_priv *r;
+	int i, ret = 0;
+
+	struct initializers {
+		struct a4l_subdevice *subd;
+		setup_subd_function init;
+		int private_len;
+		char *name;
+		int index;
+	} sds[] = {
+		[AI_SUBD] = {
+			.name = "AI",
+			.private_len = sizeof(struct ai_priv),
+			.init = setup_ai_subd,
+			.index = AI_SUBD,
+			.subd = NULL,
+		},
+		[DIO_SUBD] = {
+			.name = "DIO",
+			.private_len = sizeof(struct dio_priv),
+			.init = setup_dio_subd,
+			.index = DIO_SUBD,
+			.subd = NULL,
+		},
+		[AO_SUBD] = {
+			.name = "AO",
+			.private_len = sizeof(struct ao_ai2_priv),
+			.init = setup_ao_subd,
+			.index = AO_SUBD,
+			.subd = NULL,
+		},
+		[AI2_SUBD] = {
+			.name = "AI2",
+			.private_len = sizeof(struct ao_ai2_priv *),
+			.init = setup_ai2_subd,
+			.index = AI2_SUBD,
+			.subd = NULL,
+		},
+	};
+
+	a4l_dbg(1, drv_dbg, dev, "starting attach procedure...\n");
+
+	/* Set default values for attach parameters */
+	priv->amplitude_div = 1;
+	priv->quanta_cnt = 1;
+	if (arg->opts_size) {
+		unsigned long *args = (unsigned long *)arg->opts;
+		priv->amplitude_div = args[0];
+		if (arg->opts_size == 2 * sizeof(unsigned long))
+			priv->quanta_cnt = (args[1] > 7 || args[1] == 0) ?
+				1 : args[1];
+	}
+
+	/* create and register the subdevices */
+	for (i = 0; i < ARRAY_SIZE(sds) ; i++) {
+
+		subd = a4l_alloc_subd(sds[i].private_len, sds[i].init);
+		if (subd == NULL)
+			return -ENOMEM;
+
+		ret = a4l_add_subd(dev, subd);
+		if (ret != sds[i].index)
+			return (ret < 0) ? ret : -EINVAL;
+
+		sds[i].subd = subd;
+
+		a4l_dbg(1, drv_dbg, dev, " %s subdev registered \n", sds[i].name);
+	}
+
+	/* initialize specifics */
+	r = (void *) sds[AI_SUBD].subd->priv;
+	r->amplitude_div = priv->amplitude_div;
+	r->quanta_cnt = priv->quanta_cnt;
+
+	/* A0 and AI2 shared their private buffers */
+	tmp = (unsigned long) sds[AO_SUBD].subd->priv;
+	memcpy(sds[AI2_SUBD].subd->priv, &tmp, sds[AI2_SUBD].private_len) ;
+
+	/* create the task */
+	ret = rtdm_task_init(&priv->task, "Fake AI task", task_proc, dev,
+		             RTDM_TASK_HIGHEST_PRIORITY, 0);
+	if (ret)
+		a4l_dbg(1, drv_dbg, dev, "Error creating A4L task \n");
+
+	a4l_dbg(1, drv_dbg, dev, "attach procedure completed: "
+				 "adiv = %lu, qcount = %lu \n"
+		                  , priv->amplitude_div, priv->quanta_cnt);
+
+	return ret;
+}
+
+int test_detach(struct a4l_device *dev)
+{
+	struct fake_priv *priv = (struct fake_priv *)dev->priv;
+
+	rtdm_task_destroy(&priv->task);
+	a4l_dbg(1, drv_dbg, dev, "detach procedure complete\n");
+
+	return 0;
+}
+
+/* --- Module stuff --- */
+
+static struct a4l_driver test_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_fake",
+	.driver_name = "fake",
+	.attach = test_attach,
+	.detach = test_detach,
+	.privdata_size = sizeof(struct fake_priv),
+};
+
+static int __init a4l_fake_init(void)
+{
+	return a4l_register_drv(&test_drv);
+}
+
+static void __exit a4l_fake_cleanup(void)
+{
+	a4l_unregister_drv(&test_drv);
+}
+
+MODULE_DESCRIPTION("Analogy fake driver");
+MODULE_LICENSE("GPL");
+
+module_init(a4l_fake_init);
+module_exit(a4l_fake_cleanup);
+++ linux-patched/drivers/xenomai/analogy/testing/Kconfig	2022-03-21 12:58:31.182871477 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/testing/Makefile	1970-01-01 01:00:00.000000000 +0100
+
+config XENO_DRIVERS_ANALOGY_FAKE
+	depends on XENO_DRIVERS_ANALOGY
+	tristate "Fake driver"
+	default n
+	help
+
+	The fake driver displays many subdevices:
+	- 0: analog input;
+	- 1: digital input / output;
+	- 2: analog output;
+	- 3: analog input; data written into the subdevice 2 can be
+          read here.
+++ linux-patched/drivers/xenomai/analogy/testing/Makefile	2022-03-21 12:58:31.175871545 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/testing/loop.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_FAKE) += analogy_fake.o
+
+analogy_fake-y := fake.o
+
+analogy_loop-y := loop.o
+++ linux-patched/drivers/xenomai/analogy/testing/loop.c	2022-03-21 12:58:31.167871623 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/pcimio.c	1970-01-01 01:00:00.000000000 +0100
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#define LOOP_TASK_PERIOD 1000000
+#define LOOP_NB_BITS 16
+
+#define LOOP_INPUT_SUBD 0
+#define LOOP_OUTPUT_SUBD 1
+
+/* Channels descriptor */
+static struct a4l_channels_desc loop_chandesc = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, LOOP_NB_BITS},
+	},
+};
+
+/* Ranges tab */
+static struct a4l_rngtab loop_rngtab = {
+	.length =  2,
+	.rngs = {
+		RANGE_V(-5,5),
+		RANGE_V(-10,10),
+	},
+};
+/* Ranges descriptor */
+struct a4l_rngdesc loop_rngdesc = RNG_GLOBAL(loop_rngtab);
+
+/* Command options mask */
+static struct a4l_cmd_desc loop_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT,
+	.scan_begin_src = TRIG_TIMER,
+	.convert_src = TRIG_NOW | TRIG_TIMER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT| TRIG_NONE,
+};
+
+/* Private data organization */
+struct loop_priv {
+
+	/* Task descriptor */
+	rtdm_task_t loop_task;
+
+	/* Misc fields */
+	int loop_running;
+	uint16_t loop_insn_value;
+};
+typedef struct loop_priv lpprv_t;
+
+/* Attach arguments contents */
+struct loop_attach_arg {
+	unsigned long period;
+};
+typedef struct loop_attach_arg lpattr_t;
+
+static void loop_task_proc(void *arg);
+
+/* --- Task part --- */
+
+/* Timer task routine  */
+static void loop_task_proc(void *arg)
+{
+	struct a4l_device *dev = (struct a4l_device*)arg;
+	struct a4l_subdevice *input_subd, *output_subd;
+	lpprv_t *priv = (lpprv_t *)dev->priv;
+
+	input_subd = a4l_get_subd(dev, LOOP_INPUT_SUBD);
+	output_subd = a4l_get_subd(dev, LOOP_OUTPUT_SUBD);
+
+	if (input_subd == NULL || output_subd == NULL) {
+		a4l_err(dev, "loop_task_proc: subdevices unavailable\n");
+		return;
+	}
+
+	while (1) {
+
+		int running;
+
+		running = priv->loop_running;
+
+		if (running) {
+			uint16_t value;
+			int ret=0;
+
+			while (ret==0) {
+
+				ret = a4l_buf_get(output_subd,
+						  &value, sizeof(uint16_t));
+				if (ret == 0) {
+
+					a4l_info(dev,
+						 "loop_task_proc: "
+						 "data available\n");
+
+					a4l_buf_evt(output_subd, 0);
+
+					ret = a4l_buf_put(input_subd,
+							  &value,
+							  sizeof(uint16_t));
+
+					if (ret==0)
+						a4l_buf_evt(input_subd, 0);
+				}
+			}
+		}
+
+		rtdm_task_sleep(LOOP_TASK_PERIOD);
+	}
+}
+
+/* --- Analogy Callbacks --- */
+
+/* Command callback */
+int loop_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	a4l_info(subd->dev, "loop_cmd: (subd=%d)\n", subd->idx);
+
+	return 0;
+
+}
+
+/* Trigger callback */
+int loop_trigger(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	lpprv_t *priv = (lpprv_t *)subd->dev->priv;
+
+	a4l_info(subd->dev, "loop_trigger: (subd=%d)\n", subd->idx);
+
+	priv->loop_running = 1;
+
+	return 0;
+}
+
+/* Cancel callback */
+void loop_cancel(struct a4l_subdevice *subd)
+{
+	lpprv_t *priv = (lpprv_t *)subd->dev->priv;
+
+	a4l_info(subd->dev, "loop_cancel: (subd=%d)\n", subd->idx);
+
+	priv->loop_running = 0;
+}
+
+/* Read instruction callback */
+int loop_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	lpprv_t *priv = (lpprv_t*)subd->dev->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Sets the memorized value */
+	data[0] = priv->loop_insn_value;
+
+	return 0;
+}
+
+/* Write instruction callback */
+int loop_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	lpprv_t *priv = (lpprv_t*)subd->dev->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Retrieves the value to memorize */
+	priv->loop_insn_value = data[0];
+
+	return 0;
+}
+
+void setup_input_subd(struct a4l_subdevice *subd)
+{
+	memset(subd, 0, sizeof(struct a4l_subdevice));
+
+	subd->flags |= A4L_SUBD_AI;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &loop_rngdesc;
+	subd->chan_desc = &loop_chandesc;
+	subd->do_cmd = loop_cmd;
+	subd->cancel = loop_cancel;
+	subd->cmd_mask = &loop_cmd_mask;
+	subd->insn_read = loop_insn_read;
+	subd->insn_write = loop_insn_write;
+}
+
+void setup_output_subd(struct a4l_subdevice *subd)
+{
+	memset(subd, 0, sizeof(struct a4l_subdevice));
+
+	subd->flags = A4L_SUBD_AO;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &loop_rngdesc;
+	subd->chan_desc = &loop_chandesc;
+	subd->do_cmd = loop_cmd;
+	subd->cancel = loop_cancel;
+	subd->trigger = loop_trigger;
+	subd->cmd_mask = &loop_cmd_mask;
+	subd->insn_read = loop_insn_read;
+	subd->insn_write = loop_insn_write;
+}
+
+/* Attach callback */
+int loop_attach(struct a4l_device *dev,
+		a4l_lnkdesc_t *arg)
+{
+	int ret = 0;
+	struct a4l_subdevice *subd;
+	lpprv_t *priv = (lpprv_t *)dev->priv;
+
+	/* Add the fake input subdevice */
+	subd = a4l_alloc_subd(0, setup_input_subd);
+	if (subd == NULL)
+		return -ENOMEM;
+
+	ret = a4l_add_subd(dev, subd);
+	if (ret != LOOP_INPUT_SUBD)
+		/* Let Analogy free the lately allocated subdevice */
+		return (ret < 0) ? ret : -EINVAL;
+
+	/* Add the fake output subdevice */
+	subd = a4l_alloc_subd(0, setup_output_subd);
+	if (subd == NULL)
+		/* Let Analogy free the lately allocated subdevice */
+		return -ENOMEM;
+
+	ret = a4l_add_subd(dev, subd);
+	if (ret != LOOP_OUTPUT_SUBD)
+		/* Let Analogy free the lately allocated subdevices */
+		return (ret < 0) ? ret : -EINVAL;
+
+	priv->loop_running = 0;
+	priv->loop_insn_value = 0;
+
+	ret = rtmd_task_init(&priv->loop_task,
+			    "a4l_loop task",
+			    loop_task_proc,
+			    dev, RTDM_TASK_HIGHEST_PRIORITY, 0);
+
+	return ret;
+}
+
+/* Detach callback */
+int loop_detach(struct a4l_device *dev)
+{
+	lpprv_t *priv = (lpprv_t *)dev->priv;
+
+	rtdm_task_destroy(&priv->loop_task);
+
+	return 0;
+}
+
+/* --- Module part --- */
+
+static struct a4l_driver loop_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_loop",
+	.attach = loop_attach,
+	.detach = loop_detach,
+	.privdata_size = sizeof(lpprv_t),
+};
+
+static int __init a4l_loop_init(void)
+{
+	return a4l_register_drv(&loop_drv);
+}
+
+static void __exit a4l_loop_cleanup(void)
+{
+	a4l_unregister_drv(&loop_drv);
+}
+
+MODULE_DESCRIPTION("Analogy loop driver");
+MODULE_LICENSE("GPL");
+
+module_init(a4l_loop_init);
+module_exit(a4l_loop_cleanup);
+++ linux-patched/drivers/xenomai/analogy/national_instruments/pcimio.c	2022-03-21 12:58:31.160871692 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/ni_mio.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI PCI-MIO E series cards
+ *
+ * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Description: National Instruments PCI-MIO-E series and M series
+ * (all boards)
+ *
+ * Author: ds, John Hallen, Frank Mori Hess, Rolf Mueller, Herbert Peremans,
+ * Herman Bruyninckx, Terry Barnaby
+ * Status: works
+ * Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
+ * PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014,
+ * PCI-6040E,PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E,
+ * PCI-6071E, PCI-6023E, PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E,
+ * PCI-6035E, PCI-6052E, PCI-6110, PCI-6111, PCI-6220, PCI-6221,
+ * PCI-6224, PCI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251,
+ * PCI-6254, PCI-6259, PCIe-6259, PCI-6280, PCI-6281, PXI-6281,
+ * PCI-6284, PCI-6289, PCI-6711, PXI-6711, PCI-6713, PXI-6713,
+ * PXI-6071E, PCI-6070E, PXI-6070E, PXI-6052E, PCI-6036E, PCI-6731,
+ * PCI-6733, PXI-6733, PCI-6143, PXI-6143
+ *
+ * These boards are almost identical to the AT-MIO E series, except that
+ * they use the PCI bus instead of ISA (i.e., AT).  See the notes for
+ * the ni_atmio.o driver for additional information about these boards.
+ *
+ * By default, the driver uses DMA to transfer analog input data to
+ * memory.  When DMA is enabled, not all triggering features are
+ * supported.
+ *
+ * Note that the PCI-6143 is a simultaneous sampling device with 8
+ * convertors. With this board all of the convertors perform one
+ * simultaneous sample during a scan interval. The period for a scan
+ * is used for the convert time in an Analgoy cmd. The convert trigger
+ * source is normally set to TRIG_NOW by default.
+ *
+ * The RTSI trigger bus is supported on these cards on subdevice
+ * 10. See the Analogy library documentation for details.
+ *
+ * References:
+ * 341079b.pdf  PCI E Series Register-Level Programmer Manual
+ * 340934b.pdf  DAQ-STC reference manual
+ * 322080b.pdf  6711/6713/6715 User Manual
+ * 320945c.pdf  PCI E Series User Manual
+ * 322138a.pdf  PCI-6052E and DAQPad-6052E User Manual
+ *
+ * ISSUES:
+ * - When DMA is enabled, XXX_EV_CONVERT does not work correctly.
+ * - Calibration is not fully implemented
+ * - SCXI is probably broken for m-series boards
+ * - Digital I/O may not work on 673x.
+ * - Information (number of channels, bits, etc.) for some devices may
+ *   be incorrect.  Please check this and submit a bug if there are
+ *   problems for your device.
+ * - Need to deal with external reference for DAC, and other DAC
+ *   properties in board properties
+ * - Deal with at-mio-16de-10 revision D to N changes, etc.
+ * - Need to add other CALDAC type
+ * - Need to slow down DAC loading.  I don't trust NI's claim that two
+ *   writes to the PCI bus slows IO enough.  I would prefer to use
+ *   a4l_udelay().  Timing specs: (clock)
+ *     AD8522   30ns
+ *     DAC8043  120ns
+ *     DAC8800  60ns
+ *     MB88341   ?
+ *
+ */
+
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#include "../intel/8255.h"
+#include "ni_stc.h"
+#include "ni_mio.h"
+#include "mite.h"
+
+#define PCIMIO_IRQ_POLARITY 1
+
+/* The following two tables must be in the same order */
+static struct pci_device_id ni_pci_table[] __maybe_unused = {
+	{ PCI_VENDOR_ID_NATINST, 0x0162, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1170, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1190, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x11b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x11c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x11d0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1270, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x14e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x14f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x15b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x18b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x18c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2430, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2890, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x28c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2a60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2a70, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2a80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2ab0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2b80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2b90, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2ca0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70aa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70ab, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70ac, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70af, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70bd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x710d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x716c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x717f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x71bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x717d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(pci, ni_pci_table);
+
+/* These are not all the possible ao ranges for 628x boards.
+ They can do OFFSET +- REFERENCE where OFFSET can be
+ 0V, 5V, APFI<0,1>, or AO<0...3> and RANGE can
+ be 10V, 5V, 2V, 1V, APFI<0,1>, AO<0...3>.  That's
+ 63 different possibilities.  An AO channel
+ can not act as it's own OFFSET or REFERENCE.
+*/
+
+#if 0
+static struct a4l_rngtab rng_ni_M_628x_ao = { 8, {
+	RANGE(-10, 10),
+	RANGE(-5, 5),
+	RANGE(-2, 2),
+	RANGE(-1, 1),
+	RANGE(-5, 15),
+	RANGE(0, 10),
+	RANGE(3, 7),
+	RANGE(4, 6),
+	RANGE_ext(-1, 1)
+}};
+static struct a4l_rngdesc range_ni_M_628x_ao =
+	RNG_GLOBAL(rng_ni_M_628x_ao);
+#endif
+
+static struct a4l_rngtab rng_ni_M_625x_ao = { 3, {
+	RANGE(-10, 10),
+	RANGE(-5, 5),
+	RANGE_ext(-1, 1)
+}};
+static struct a4l_rngdesc range_ni_M_625x_ao =
+	RNG_GLOBAL(rng_ni_M_625x_ao);
+
+static struct a4l_rngtab rng_ni_M_622x_ao = { 1, {
+	RANGE(-10, 10),
+}};
+static struct a4l_rngdesc range_ni_M_622x_ao =
+	RNG_GLOBAL(rng_ni_M_622x_ao);
+
+static ni_board ni_boards[]={
+	{       device_id:      0x0162, // NI also says 0x1620.  typo?
+		name:           "pci-mio-16xe-50",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  2048,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_8,
+		ai_speed:	50000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	50000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043},
+		has_8255:       0,
+	},
+	{       device_id:      0x1170,
+		name:           "pci-mio-16xe-10", // aka pci-6030E
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{	device_id:      0x28c0,
+		name:           "pci-6014",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:       5000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{       device_id:      0x11d0,
+		name:           "pxi-6030e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+
+	{       device_id:      0x1180,
+		name:           "pci-mio-16e-1",	/* aka pci-6070e */
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_16,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {mb88341},
+		has_8255:       0,
+	},
+	{       device_id:      0x1190,
+		name:           "pci-mio-16e-4", /* aka pci-6040e */
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_16,
+		/* Note: there have been reported problems with full speed
+		 * on this board */
+		ai_speed:	2000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  512,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, // doc says mb88341
+		has_8255:       0,
+	},
+	{       device_id:      0x11c0,
+		name:           "pxi-6040e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_16,
+		ai_speed:	2000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  512,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {mb88341},
+		has_8255:       0,
+	},
+
+	{       device_id:      0x1330,
+		name:           "pci-6031e",
+		n_adchan:       64,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{       device_id:      0x1270,
+		name:           "pci-6032e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{       device_id:      0x1340,
+		name:           "pci-6033e",
+		n_adchan:       64,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{       device_id:      0x1350,
+		name:           "pci-6071e",
+		n_adchan:       64,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{       device_id:      0x2a60,
+		name:           "pci-6023e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	0,
+	},
+	{       device_id:      0x2a70,
+		name:           "pci-6024e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	0,
+	},
+	{       device_id:      0x2a80,
+		name:           "pci-6025e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	1,
+	},
+	{       device_id:      0x2ab0,
+		name:           "pxi-6025e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	1,
+	},
+
+	{       device_id:      0x2ca0,
+		name:           "pci-6034e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:	0,
+	},
+	{       device_id:      0x2c80,
+		name:           "pci-6035e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:	0,
+	},
+	{       device_id:      0x18b0,
+		name:           "pci-6052e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:	3000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_unipolar:    1,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_speed:	3000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug,ad8804_debug,ad8522}, /* manual is wrong */
+	},
+	{       device_id:      0x14e0,
+		name:           "pci-6110",
+		n_adchan:       4,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	200,
+		n_aochan:       2,
+		aobits:         16,
+		reg_type:	ni_reg_611x,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804,ad8804},
+	},
+	{       device_id:      0x14f0,
+		name:           "pci-6111",
+		n_adchan:       2,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	200,
+		n_aochan:       2,
+		aobits:         16,
+		reg_type:	ni_reg_611x,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804,ad8804},
+	},
+#if 0 /* Need device IDs */
+	/* The 6115 boards probably need their own driver */
+	{       device_id:      0x2ed0,
+		name:           "pci-6115",
+		n_adchan:       4,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	100,
+		n_aochan:       2,
+		aobits:         16,
+		ao_671x:	1,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		.num_p0_dio_channels = 8,
+		reg_611x:	1,
+		caldac:         {ad8804_debug,ad8804_debug,ad8804_debug},/* XXX */
+	},
+#endif
+#if 0 /* Need device IDs */
+	{       device_id:      0x0000,
+		name:           "pxi-6115",
+		n_adchan:       4,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	100,
+		n_aochan:       2,
+		aobits:         16,
+		ao_671x:	1,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		reg_611x:	1,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug,ad8804_debug,ad8804_debug},/* XXX */
+	},
+#endif
+	{       device_id:      0x1880,
+		name:           "pci-6711",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384, /* data sheet says 8192, but fifo really holds 16384 samples */
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+	{       device_id:      0x2b90,
+		name:           "pxi-6711",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+	{       device_id:      0x1870,
+		name:           "pci-6713",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{       device_id:      0x2b80,
+		name:           "pxi-6713",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{	device_id:	0x2430,
+		name:           "pci-6731",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  8192,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+#if 0	/* Need device IDs */
+	{       device_id:      0x0,
+		name:           "pxi-6731",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  8192,
+		.ao_range_table = &a4l_range_bipolar10,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+#endif
+	{       device_id:      0x2410,
+		name:           "pci-6733",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{       device_id:      0x2420,
+		name:           "pxi-6733",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{	device_id:      0x15b0,
+		name:           "pxi-6071e",
+		n_adchan:       64,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:       800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{	device_id:      0x11b0,
+		name:           "pxi-6070e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:       800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{	device_id:      0x18c0,
+		name:           "pxi-6052e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:	3000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_unipolar:    1,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_speed:	3000,
+		.num_p0_dio_channels = 8,
+		caldac:         {mb88341,mb88341,ad8522},
+	},
+	{	device_id:      0x1580,
+		name:           "pxi-6031e",
+		n_adchan:       64,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+	},
+	{	device_id:      0x2890,
+		name:           "pci-6036e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b0,
+		name:           "pci-6220",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,	//FIXME: guess
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70af,
+		name:           "pci-6221",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &a4l_range_bipolar10,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x71bc,
+		name:           "pci-6221_37pin",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &a4l_range_bipolar10,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70f2,
+		name:           "pci-6224",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x716c,
+		name:           "pci-6225",
+		n_adchan:       80,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_622x_ao,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70aa,
+		name:           "pci-6229",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_622x_ao,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b4,
+		name:           "pci-6250",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b8,
+		name:           "pci-6251",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x717d,
+		name:           "pcie-6251",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b7,
+		name:           "pci-6254",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70ab,
+		name:           "pci-6259",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x717f,
+		name:           "pcie-6259",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+#if 0 /* TODO: fix data size */
+	{	device_id:      0x70b6,
+		name:           "pci-6280",
+		n_adchan:       16,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  8191,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70bd,
+		name:           "pci-6281",
+		n_adchan:       16,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_628x_ao,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    1,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70bf,
+		name:           "pxi-6281",
+		n_adchan:       16,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_628x_ao,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    1,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70bc,
+		name:           "pci-6284",
+		n_adchan:       32,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70ac,
+		name:           "pci-6289",
+		n_adchan:       32,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_628x_ao,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    1,
+		ao_speed:	357,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+#endif /* TODO: fix data size */
+	{	device_id:      0x70C0,
+		name:           "pci-6143",
+		n_adchan:       8,
+		adbits:         16,
+		ai_fifo_depth:  1024,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_6143,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		reg_type:	ni_reg_6143,
+		ao_unipolar:    0,
+		ao_fifo_depth:  0,
+		.num_p0_dio_channels = 8,
+		.caldac = {ad8804_debug,ad8804_debug},
+	},
+	{	device_id:      0x710D,
+		name:           "pxi-6143",
+		n_adchan:       8,
+		adbits:         16,
+		ai_fifo_depth:  1024,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_6143,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		reg_type:	ni_reg_6143,
+		ao_unipolar:    0,
+		ao_fifo_depth:  0,
+		.num_p0_dio_channels = 8,
+		.caldac = {ad8804_debug,ad8804_debug},
+	},
+};
+#define n_pcimio_boards ((sizeof(ni_boards)/sizeof(ni_boards[0])))
+
+/* How we access STC registers */
+
+/* We automatically take advantage of STC registers that can be
+ * read/written directly in the I/O space of the board.  Most
+ * PCIMIO devices map the low 8 STC registers to iobase+addr*2.
+ * The 611x devices map the write registers to iobase+addr*2, and
+ * the read registers to iobase+(addr-1)*2. */
+/* However, the 611x boards still aren't working, so I'm disabling
+ * non-windowed STC access temporarily */
+
+static void e_series_win_out(struct a4l_device *dev, uint16_t data, int reg)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(reg, Window_Address);
+	ni_writew(data, Window_Data);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+}
+
+static uint16_t e_series_win_in(struct a4l_device *dev, int reg)
+{
+	unsigned long flags;
+	uint16_t ret;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(reg, Window_Address);
+	ret = ni_readw(Window_Data);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock,flags);
+
+	return ret;
+}
+
+static void m_series_stc_writew(struct a4l_device *dev, uint16_t data, int reg)
+{
+	unsigned offset;
+	switch(reg)
+	{
+	case ADC_FIFO_Clear:
+		offset = M_Offset_AI_FIFO_Clear;
+		break;
+	case AI_Command_1_Register:
+		offset = M_Offset_AI_Command_1;
+		break;
+	case AI_Command_2_Register:
+		offset = M_Offset_AI_Command_2;
+		break;
+	case AI_Mode_1_Register:
+		offset = M_Offset_AI_Mode_1;
+		break;
+	case AI_Mode_2_Register:
+		offset = M_Offset_AI_Mode_2;
+		break;
+	case AI_Mode_3_Register:
+		offset = M_Offset_AI_Mode_3;
+		break;
+	case AI_Output_Control_Register:
+		offset = M_Offset_AI_Output_Control;
+		break;
+	case AI_Personal_Register:
+		offset = M_Offset_AI_Personal;
+		break;
+	case AI_SI2_Load_A_Register:
+		/* This is actually a 32 bit register on m series boards */
+		ni_writel(data, M_Offset_AI_SI2_Load_A);
+		return;
+		break;
+	case AI_SI2_Load_B_Register:
+		/* This is actually a 32 bit register on m series boards */
+		ni_writel(data, M_Offset_AI_SI2_Load_B);
+		return;
+		break;
+	case AI_START_STOP_Select_Register:
+		offset = M_Offset_AI_START_STOP_Select;
+		break;
+	case AI_Trigger_Select_Register:
+		offset = M_Offset_AI_Trigger_Select;
+		break;
+	case Analog_Trigger_Etc_Register:
+		offset = M_Offset_Analog_Trigger_Etc;
+		break;
+	case AO_Command_1_Register:
+		offset = M_Offset_AO_Command_1;
+		break;
+	case AO_Command_2_Register:
+		offset = M_Offset_AO_Command_2;
+		break;
+	case AO_Mode_1_Register:
+		offset = M_Offset_AO_Mode_1;
+		break;
+	case AO_Mode_2_Register:
+		offset = M_Offset_AO_Mode_2;
+		break;
+	case AO_Mode_3_Register:
+		offset = M_Offset_AO_Mode_3;
+		break;
+	case AO_Output_Control_Register:
+		offset = M_Offset_AO_Output_Control;
+		break;
+	case AO_Personal_Register:
+		offset = M_Offset_AO_Personal;
+		break;
+	case AO_Start_Select_Register:
+		offset = M_Offset_AO_Start_Select;
+		break;
+	case AO_Trigger_Select_Register:
+		offset = M_Offset_AO_Trigger_Select;
+		break;
+	case Clock_and_FOUT_Register:
+		offset = M_Offset_Clock_and_FOUT;
+		break;
+	case Configuration_Memory_Clear:
+		offset = M_Offset_Configuration_Memory_Clear;
+		break;
+	case DAC_FIFO_Clear:
+		offset = M_Offset_AO_FIFO_Clear;
+		break;
+	case DIO_Control_Register:
+		rtdm_printk("%s: FIXME: register 0x%x does not map cleanly on to m-series boards.\n", __FUNCTION__, reg);
+		return;
+		break;
+	case G_Autoincrement_Register(0):
+		offset = M_Offset_G0_Autoincrement;
+		break;
+	case G_Autoincrement_Register(1):
+		offset = M_Offset_G1_Autoincrement;
+		break;
+	case G_Command_Register(0):
+		offset = M_Offset_G0_Command;
+		break;
+	case G_Command_Register(1):
+		offset = M_Offset_G1_Command;
+		break;
+	case G_Input_Select_Register(0):
+		offset = M_Offset_G0_Input_Select;
+		break;
+	case G_Input_Select_Register(1):
+		offset = M_Offset_G1_Input_Select;
+		break;
+	case G_Mode_Register(0):
+		offset = M_Offset_G0_Mode;
+		break;
+	case G_Mode_Register(1):
+		offset = M_Offset_G1_Mode;
+		break;
+	case Interrupt_A_Ack_Register:
+		offset = M_Offset_Interrupt_A_Ack;
+		break;
+	case Interrupt_A_Enable_Register:
+		offset = M_Offset_Interrupt_A_Enable;
+		break;
+	case Interrupt_B_Ack_Register:
+		offset = M_Offset_Interrupt_B_Ack;
+		break;
+	case Interrupt_B_Enable_Register:
+		offset = M_Offset_Interrupt_B_Enable;
+		break;
+	case Interrupt_Control_Register:
+		offset = M_Offset_Interrupt_Control;
+		break;
+	case IO_Bidirection_Pin_Register:
+		offset = M_Offset_IO_Bidirection_Pin;
+		break;
+	case Joint_Reset_Register:
+		offset = M_Offset_Joint_Reset;
+		break;
+	case RTSI_Trig_A_Output_Register:
+		offset = M_Offset_RTSI_Trig_A_Output;
+		break;
+	case RTSI_Trig_B_Output_Register:
+		offset = M_Offset_RTSI_Trig_B_Output;
+		break;
+	case RTSI_Trig_Direction_Register:
+		offset = M_Offset_RTSI_Trig_Direction;
+		break;
+		/* FIXME: DIO_Output_Register (16 bit reg) is replaced
+		by M_Offset_Static_Digital_Output (32 bit) and
+		M_Offset_SCXI_Serial_Data_Out (8 bit) */
+	default:
+		rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return;
+	}
+	ni_writew(data, offset);
+}
+
+static uint16_t m_series_stc_readw(struct a4l_device *dev, int reg)
+{
+	unsigned offset;
+	switch(reg)
+	{
+	case AI_Status_1_Register:
+		offset = M_Offset_AI_Status_1;
+		break;
+	case AO_Status_1_Register:
+		offset = M_Offset_AO_Status_1;
+		break;
+	case AO_Status_2_Register:
+		offset = M_Offset_AO_Status_2;
+		break;
+	case DIO_Serial_Input_Register:
+		return ni_readb(M_Offset_SCXI_Serial_Data_In);
+		break;
+	case Joint_Status_1_Register:
+		offset = M_Offset_Joint_Status_1;
+		break;
+	case Joint_Status_2_Register:
+		offset = M_Offset_Joint_Status_2;
+		break;
+	case G_Status_Register:
+		offset = M_Offset_G01_Status;
+		break;
+	default:
+		rtdm_printk("%s: bug! "
+			    "unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return 0;
+		break;
+	}
+	return ni_readw(offset);
+}
+
+static void m_series_stc_writel(struct a4l_device *dev, uint32_t data, int reg)
+{
+	unsigned offset;
+
+	switch(reg)
+	{
+	case AI_SC_Load_A_Registers:
+		offset = M_Offset_AI_SC_Load_A;
+		break;
+	case AI_SI_Load_A_Registers:
+		offset = M_Offset_AI_SI_Load_A;
+		break;
+	case AO_BC_Load_A_Register:
+		offset = M_Offset_AO_BC_Load_A;
+		break;
+	case AO_UC_Load_A_Register:
+		offset = M_Offset_AO_UC_Load_A;
+		break;
+	case AO_UI_Load_A_Register:
+		offset = M_Offset_AO_UI_Load_A;
+		break;
+	case G_Load_A_Register(0):
+		offset = M_Offset_G0_Load_A;
+		break;
+	case G_Load_A_Register(1):
+		offset = M_Offset_G1_Load_A;
+		break;
+	case G_Load_B_Register(0):
+		offset = M_Offset_G0_Load_B;
+		break;
+	case G_Load_B_Register(1):
+		offset = M_Offset_G1_Load_B;
+		break;
+	default:
+		rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return;
+	}
+	ni_writel(data, offset);
+}
+
+static uint32_t m_series_stc_readl(struct a4l_device *dev, int reg)
+{
+	unsigned offset;
+	switch(reg)
+	{
+	case G_HW_Save_Register(0):
+		offset = M_Offset_G0_HW_Save;
+		break;
+	case G_HW_Save_Register(1):
+		offset = M_Offset_G1_HW_Save;
+		break;
+	case G_Save_Register(0):
+		offset = M_Offset_G0_Save;
+		break;
+	case G_Save_Register(1):
+		offset = M_Offset_G1_Save;
+		break;
+	default:
+		rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return 0;
+	}
+	return ni_readl(offset);
+}
+
+static void win_out2(struct a4l_device *dev, uint32_t data, int reg)
+{
+	devpriv->stc_writew(dev, data >> 16, reg);
+	devpriv->stc_writew(dev, data & 0xffff, reg + 1);
+}
+
+static uint32_t win_in2(struct a4l_device *dev, int reg)
+{
+	uint32_t bits;
+	bits = devpriv->stc_readw(dev, reg) << 16;
+	bits |= devpriv->stc_readw(dev, reg + 1);
+	return bits;
+}
+
+static void m_series_init_eeprom_buffer(struct a4l_device *dev)
+{
+	static const int Start_Cal_EEPROM = 0x400;
+	static const unsigned window_size = 10;
+	unsigned old_iodwbsr_bits;
+	unsigned old_iodwbsr1_bits;
+	unsigned old_iodwcr1_bits;
+	int i;
+
+	old_iodwbsr_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR);
+	old_iodwbsr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
+	old_iodwcr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWCR_1);
+	writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWBSR);
+	writel(((0x80 | window_size) | devpriv->mite->daq_phys_addr),
+	       devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
+	writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWCR_1);
+	writel(0xf, devpriv->mite->mite_io_addr + 0x30);
+
+	for(i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
+	{
+		devpriv->eeprom_buffer[i] = ni_readb(Start_Cal_EEPROM + i);
+	}
+
+	writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
+	writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR);
+	writel(old_iodwcr1_bits, devpriv->mite->mite_io_addr + MITE_IODWCR_1);
+	writel(0x0, devpriv->mite->mite_io_addr + 0x30);
+}
+
+static void init_6143(struct a4l_device *dev)
+{
+	/* Disable interrupts */
+	devpriv->stc_writew(dev, 0, Interrupt_Control_Register);
+
+	/* Initialise 6143 AI specific bits */
+
+	/* Set G0,G1 DMA mode to E series version */
+	ni_writeb(0x00, Magic_6143);
+	/* Set EOCMode, ADCMode and pipelinedelay */
+	ni_writeb(0x80, PipelineDelay_6143);
+	/* Set EOC Delay */
+	ni_writeb(0x00, EOC_Set_6143);
+
+	/* Set the FIFO half full level */
+	ni_writel(boardtype.ai_fifo_depth / 2, AIFIFO_Flag_6143);
+
+	/* Strobe Relay disable bit */
+	devpriv->ai_calib_source_enabled = 0;
+	ni_writew(devpriv->ai_calib_source | Calibration_Channel_6143_RelayOff,
+		  Calibration_Channel_6143);
+	ni_writew(devpriv->ai_calib_source, Calibration_Channel_6143);
+}
+
+static int pcimio_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	int ret, bus, slot, i, irq;
+	struct mite_struct *mite = NULL;
+	struct ni_board_struct *board = NULL;
+
+	if(arg->opts == NULL || arg->opts_size == 0)
+		bus = slot = 0;
+	else {
+		bus = arg->opts_size >= sizeof(unsigned long) ?
+			((unsigned long *)arg->opts)[0] : 0;
+		slot = arg->opts_size >= sizeof(unsigned long) * 2 ?
+			((unsigned long *)arg->opts)[1] : 0;
+	}
+
+	for(i = 0; i < n_pcimio_boards && mite == NULL; i++) {
+		mite = a4l_mite_find_device(bus, slot, ni_boards[i].device_id);
+		board = &ni_boards[i];
+	}
+
+	if(mite == 0)
+		return -ENOENT;
+
+	devpriv->irq_polarity = PCIMIO_IRQ_POLARITY;
+	devpriv->irq_pin = 0;
+
+	devpriv->mite = mite;
+	devpriv->board_ptr = board;
+
+	devpriv->ai_mite_ring = mite_alloc_ring(mite);
+	devpriv->ao_mite_ring = mite_alloc_ring(mite);
+	devpriv->cdo_mite_ring = mite_alloc_ring(mite);
+	devpriv->gpct_mite_ring[0] = mite_alloc_ring(mite);
+	devpriv->gpct_mite_ring[1] = mite_alloc_ring(mite);
+
+	if(devpriv->ai_mite_ring == NULL ||
+	   devpriv->ao_mite_ring == NULL ||
+	   devpriv->cdo_mite_ring == NULL ||
+	   devpriv->gpct_mite_ring[0] == NULL ||
+	   devpriv->gpct_mite_ring[1] == NULL)
+		return -ENOMEM;
+
+	a4l_info(dev, "found %s board\n", boardtype.name);
+
+	if(boardtype.reg_type & ni_reg_m_series_mask)
+	{
+		devpriv->stc_writew = &m_series_stc_writew;
+		devpriv->stc_readw = &m_series_stc_readw;
+		devpriv->stc_writel = &m_series_stc_writel;
+		devpriv->stc_readl = &m_series_stc_readl;
+	}else
+	{
+		devpriv->stc_writew = &e_series_win_out;
+		devpriv->stc_readw = &e_series_win_in;
+		devpriv->stc_writel = &win_out2;
+		devpriv->stc_readl = &win_in2;
+	}
+
+	ret = a4l_mite_setup(devpriv->mite, 0);
+	if(ret < 0)
+	{
+		a4l_err(dev, "pcmio_attach: error setting up mite\n");
+		return ret;
+	}
+
+	if(boardtype.reg_type & ni_reg_m_series_mask)
+		m_series_init_eeprom_buffer(dev);
+	if(boardtype.reg_type == ni_reg_6143)
+		init_6143(dev);
+
+	irq = mite_irq(devpriv->mite);
+
+	if(irq == 0){
+		a4l_warn(dev, "pcimio_attach: unknown irq (bad)\n\n");
+	}else{
+		a4l_info(dev, "found irq %u\n", irq);
+		ret = a4l_request_irq(dev,
+				      irq,
+				      a4l_ni_E_interrupt, RTDM_IRQTYPE_SHARED, dev);
+		if(ret < 0)
+			a4l_err(dev, "pcimio_attach: irq not available\n");
+	}
+
+	ret = a4l_ni_E_init(dev);
+	if(ret < 0)
+		return ret;
+
+	dev->driver->driver_name = devpriv->board_ptr->name;
+
+	return ret;
+}
+
+static int pcimio_detach(struct a4l_device *dev)
+{
+	if(a4l_get_irq(dev)!=A4L_IRQ_UNUSED){
+		a4l_free_irq(dev,a4l_get_irq(dev));
+	}
+
+	if(dev->priv != NULL && devpriv->mite != NULL)
+	{
+		mite_free_ring(devpriv->ai_mite_ring);
+		mite_free_ring(devpriv->ao_mite_ring);
+		mite_free_ring(devpriv->gpct_mite_ring[0]);
+		mite_free_ring(devpriv->gpct_mite_ring[1]);
+		a4l_mite_unsetup(devpriv->mite);
+	}
+
+	dev->driver->driver_name = NULL;
+
+	return 0;
+}
+
+static struct a4l_driver pcimio_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_ni_pcimio",
+	.driver_name = NULL,
+	.attach = pcimio_attach,
+	.detach = pcimio_detach,
+	.privdata_size = sizeof(ni_private),
+};
+
+static int __init pcimio_init(void)
+{
+	return a4l_register_drv(&pcimio_drv);
+}
+
+static void __exit pcimio_cleanup(void)
+{
+	a4l_unregister_drv(&pcimio_drv);
+}
+
+MODULE_DESCRIPTION("Analogy driver for NI PCI-MIO series cards");
+MODULE_LICENSE("GPL");
+
+module_init(pcimio_init);
+module_exit(pcimio_cleanup);
+++ linux-patched/drivers/xenomai/analogy/national_instruments/ni_mio.h	2022-03-21 12:58:31.153871760 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/ni_670x.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI Mite PCI interface chip
+ * Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __ANALOGY_NI_MIO_H__
+#define __ANALOGY_NI_MIO_H__
+
+/* Debug stuff */
+
+#ifdef CONFIG_DEBUG_MIO
+#define MDPRINTK(fmt, args...) rtdm_printk(format, ##args)
+#else /* !CONFIG_DEBUG_MIO */
+#define MDPRINTK(fmt, args...)
+#endif /* CONFIG_DEBUG_MIO */
+
+/* Subdevice related defines */
+
+#define AIMODE_NONE		0
+#define AIMODE_HALF_FULL	1
+#define AIMODE_SCAN		2
+#define AIMODE_SAMPLE		3
+
+#define NI_AI_SUBDEV		0
+#define NI_AO_SUBDEV		1
+#define NI_DIO_SUBDEV		2
+#define NI_8255_DIO_SUBDEV	3
+#define NI_UNUSED_SUBDEV	4
+#define NI_CALIBRATION_SUBDEV	5
+#define NI_EEPROM_SUBDEV	6
+#define NI_PFI_DIO_SUBDEV	7
+#define NI_CS5529_CALIBRATION_SUBDEV 8
+#define NI_SERIAL_SUBDEV	9
+#define NI_RTSI_SUBDEV		10
+#define NI_GPCT0_SUBDEV		11
+#define NI_GPCT1_SUBDEV		12
+#define NI_FREQ_OUT_SUBDEV	13
+#define NI_NUM_SUBDEVICES	14
+
+#define NI_GPCT_SUBDEV(x)	((x == 1) ? NI_GPCT1_SUBDEV : NI_GPCT0_SUBDEV)
+
+#define TIMEBASE_1_NS		50
+#define TIMEBASE_2_NS		10000
+
+#define SERIAL_DISABLED		0
+#define SERIAL_600NS		600
+#define SERIAL_1_2US		1200
+#define SERIAL_10US		10000
+
+/* PFI digital filtering options for ni m-series for use with
+   INSN_CONFIG_FILTER. */
+#define NI_PFI_FILTER_OFF	0x0
+#define NI_PFI_FILTER_125ns	0x1
+#define NI_PFI_FILTER_6425ns	0x2
+#define NI_PFI_FILTER_2550us	0x3
+
+/* Signals which can be routed to an NI PFI pin on an m-series board
+   with INSN_CONFIG_SET_ROUTING. These numbers are also returned by
+   INSN_CONFIG_GET_ROUTING on pre-m-series boards, even though their
+   routing cannot be changed. The numbers assigned are not arbitrary,
+   they correspond to the bits required to program the board. */
+#define NI_PFI_OUTPUT_PFI_DEFAULT	0
+#define NI_PFI_OUTPUT_AI_START1		1
+#define NI_PFI_OUTPUT_AI_START2		2
+#define NI_PFI_OUTPUT_AI_CONVERT	3
+#define NI_PFI_OUTPUT_G_SRC1		4
+#define NI_PFI_OUTPUT_G_GATE1		5
+#define NI_PFI_OUTPUT_AO_UPDATE_N	6
+#define NI_PFI_OUTPUT_AO_START1		7
+#define NI_PFI_OUTPUT_AI_START_PULSE	8
+#define NI_PFI_OUTPUT_G_SRC0		9
+#define NI_PFI_OUTPUT_G_GATE0		10
+#define NI_PFI_OUTPUT_EXT_STROBE	11
+#define NI_PFI_OUTPUT_AI_EXT_MUX_CLK	12
+#define NI_PFI_OUTPUT_GOUT0		13
+#define NI_PFI_OUTPUT_GOUT1		14
+#define NI_PFI_OUTPUT_FREQ_OUT		15
+#define NI_PFI_OUTPUT_PFI_DO		16
+#define NI_PFI_OUTPUT_I_ATRIG		17
+#define NI_PFI_OUTPUT_RTSI0		18
+#define NI_PFI_OUTPUT_PXI_STAR_TRIGGER_IN 26
+#define NI_PFI_OUTPUT_SCXI_TRIG1	27
+#define NI_PFI_OUTPUT_DIO_CHANGE_DETECT_RTSI 28
+#define NI_PFI_OUTPUT_CDI_SAMPLE	29
+#define NI_PFI_OUTPUT_CDO_UPDATE	30
+
+static inline unsigned int NI_PFI_OUTPUT_RTSI(unsigned rtsi_channel) {
+	return NI_PFI_OUTPUT_RTSI0 + rtsi_channel;
+}
+
+/* Ranges declarations */
+
+extern struct a4l_rngdesc a4l_range_ni_E_ai;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_limited;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_limited14;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_bipolar4;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_611x;
+extern struct a4l_rngdesc range_ni_E_ai_622x;
+extern struct a4l_rngdesc range_ni_E_ai_628x;
+extern struct a4l_rngdesc a4l_range_ni_S_ai_6143;
+extern struct a4l_rngdesc a4l_range_ni_E_ao_ext;
+
+/* Misc functions declarations */
+
+int a4l_ni_E_interrupt(unsigned int irq, void *d);
+int a4l_ni_E_init(struct a4l_device *dev);
+
+
+#endif /* !__ANALOGY_NI_MIO_H__ */
+++ linux-patched/drivers/xenomai/analogy/national_instruments/ni_670x.c	2022-03-21 12:58:31.145871838 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+    comedi/drivers/ni_670x.c
+    Hardware driver for NI 670x devices
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+/*
+Driver: ni_670x
+Description: National Instruments 670x
+Author: Bart Joris <bjoris@advalvas.be>
+Updated: Wed, 11 Dec 2002 18:25:35 -0800
+Devices: [National Instruments] PCI-6703 (ni_670x), PCI-6704
+Status: unknown
+
+Commands are not supported.
+*/
+
+/*
+	Bart Joris <bjoris@advalvas.be> Last updated on 20/08/2001
+
+	Manuals:
+
+	322110a.pdf	PCI/PXI-6704 User Manual
+	322110b.pdf	PCI/PXI-6703/6704 User Manual
+*/
+
+/*
+ * Integration with Xenomai/Analogy layer based on the
+ * comedi driver. Adaptation made by
+ *   Julien Delange <julien.delange@esa.int>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <rtdm/analogy/device.h>
+
+#include "../intel/8255.h"
+#include "ni_mio.h"
+#include "mite.h"
+
+#define PCIMIO_IRQ_POLARITY 1
+
+#define  AO_VALUE_OFFSET         0x00
+#define  AO_CHAN_OFFSET          0x0c
+#define  AO_STATUS_OFFSET        0x10
+#define  AO_CONTROL_OFFSET       0x10
+#define  DIO_PORT0_DIR_OFFSET    0x20
+#define  DIO_PORT0_DATA_OFFSET   0x24
+#define  DIO_PORT1_DIR_OFFSET    0x28
+#define  DIO_PORT1_DATA_OFFSET   0x2c
+#define  MISC_STATUS_OFFSET      0x14
+#define  MISC_CONTROL_OFFSET     0x14
+
+/* Board description*/
+
+struct ni_670x_board {
+	unsigned short device_id;
+	const char *name;
+	unsigned short ao_chans;
+	unsigned short ao_bits;
+};
+
+#define thisboard ((struct ni_670x_board *)dev->board_ptr)
+
+struct ni_670x_private {
+	struct mite_struct *mite;
+	int boardtype;
+	int dio;
+	unsigned int ao_readback[32];
+
+	/*
+	 * Added when porting to xenomai
+	 */
+	int irq_polarity;
+	int irq_pin;
+	int irq;
+	struct ni_670x_board *board_ptr;
+	/*
+	 * END OF ADDED when porting to xenomai
+	 */
+};
+
+struct ni_670x_subd_priv {
+	int io_bits;
+	unsigned int state;
+	uint16_t readback[2];
+	uint16_t config;
+	void* counter;
+};
+
+static int ni_670x_ao_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+static int ni_670x_ao_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+static int ni_670x_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+static int ni_670x_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+
+static struct a4l_channels_desc ni_670x_desc_dio = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc ni_670x_desc_ao = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 0, /* initialized later according to the board found */
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 16},
+	},
+};
+
+
+static struct a4l_rngtab range_0_20mA = { 1, {RANGE_mA(0, 20)} };
+static struct a4l_rngtab rng_bipolar10 = { 1, {RANGE_V(-10, 10) }};
+
+struct a4l_rngtab *range_table_list[32] = {
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA};
+
+static A4L_RNGDESC(32) ni670x_ao_desc;
+
+static void setup_subd_ao(struct a4l_subdevice *subd)
+{
+	int i;
+	int nchans;
+
+	nchans = ((struct ni_670x_private*)(subd->dev->priv))->board_ptr->ao_chans;
+	subd->flags                = A4L_SUBD_AO;
+	subd->chan_desc            = &ni_670x_desc_ao;
+	subd->chan_desc->length    = nchans;
+	if (nchans == 32) {
+
+		subd->rng_desc = (struct a4l_rngdesc*) &ni670x_ao_desc;
+		subd->rng_desc->mode = A4L_RNG_PERCHAN_RNGDESC;
+		for (i = 0 ; i < 16 ; i++) {
+			subd->rng_desc->rngtabs[i] =&rng_bipolar10;
+			subd->rng_desc->rngtabs[16+i] =&range_0_20mA;
+		}
+	} else
+		subd->rng_desc = &a4l_range_bipolar10;
+
+	subd->insn_write = &ni_670x_ao_winsn;
+	subd->insn_read = &ni_670x_ao_rinsn;
+}
+
+static void setup_subd_dio(struct a4l_subdevice *s)
+{
+	/* Digital i/o subdevice */
+	s->flags = A4L_SUBD_DIO;
+	s->chan_desc = &ni_670x_desc_dio;
+	s->rng_desc = &range_digital;
+	s->insn_bits = ni_670x_dio_insn_bits;
+	s->insn_config = ni_670x_dio_insn_config;
+}
+
+struct setup_subd {
+	void (*setup_func) (struct a4l_subdevice *);
+	int sizeof_priv;
+};
+
+static struct setup_subd setup_subds[2] = {
+	{
+		.setup_func = setup_subd_ao,
+		.sizeof_priv = sizeof(struct ni_670x_subd_priv),
+	},
+	{
+		.setup_func = setup_subd_dio,
+		.sizeof_priv = sizeof(struct ni_670x_subd_priv),
+	},
+};
+
+static const struct ni_670x_board ni_670x_boards[] = {
+	{
+		.device_id = 0x2c90,
+		.name = "PCI-6703",
+		.ao_chans = 16,
+		.ao_bits = 16,
+	},
+	{
+		.device_id = 0x1920,
+		.name = "PXI-6704",
+		.ao_chans = 32,
+		.ao_bits = 16,
+	},
+	{
+		.device_id = 0x1290,
+		.name = "PCI-6704",
+		.ao_chans = 32,
+		.ao_bits = 16,
+	 },
+};
+
+#define n_ni_670x_boards ((sizeof(ni_670x_boards)/sizeof(ni_670x_boards[0])))
+
+static const struct pci_device_id ni_670x_pci_table[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2c90)},
+	{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1920)},
+	{0}
+};
+
+MODULE_DEVICE_TABLE(pci, ni_670x_pci_table);
+
+#define devpriv ((struct ni_670x_private *)dev->priv)
+
+static inline struct ni_670x_private *private(struct a4l_device *dev)
+{
+	return (struct ni_670x_private*) dev->priv;
+}
+
+
+static int ni_670x_attach (struct a4l_device *dev, a4l_lnkdesc_t *arg);
+static int ni_670x_detach(struct a4l_device *dev);
+
+static struct a4l_driver ni_670x_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_ni_670x",
+	.driver_name = "ni_670x",
+	.attach = ni_670x_attach,
+	.detach = ni_670x_detach,
+	.privdata_size = sizeof(struct ni_670x_private),
+};
+
+static int __init driver_ni_670x_init_module(void)
+{
+	return a4l_register_drv (&ni_670x_drv);
+}
+
+static void __exit driver_ni_670x_cleanup_module(void)
+{
+	a4l_unregister_drv (&ni_670x_drv);
+}
+
+module_init(driver_ni_670x_init_module);
+module_exit(driver_ni_670x_cleanup_module);
+
+static int ni_670x_attach (struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	int ret, bus, slot, i, irq;
+	struct mite_struct *mite;
+	struct ni_670x_board* board = NULL;
+	int err;
+
+	if(arg->opts == NULL || arg->opts_size == 0)
+		bus = slot = 0;
+	else {
+		bus = arg->opts_size >= sizeof(unsigned long) ?
+			((unsigned long *)arg->opts)[0] : 0;
+		slot = arg->opts_size >= sizeof(unsigned long) * 2 ?
+			((unsigned long *)arg->opts)[1] : 0;
+	}
+
+	a4l_info(dev, "ni670x attach procedure started(bus=%d/slot=%d)...\n",
+		 bus, slot);
+
+	mite = NULL;
+
+	for(i = 0; i <  n_ni_670x_boards && mite == NULL; i++) {
+		mite = a4l_mite_find_device(bus,
+					    slot, ni_670x_boards[i].device_id);
+		board = (struct ni_670x_board*) &ni_670x_boards[i];
+	}
+
+	if(mite == NULL) {
+		a4l_err(dev, "%s: cannot find the MITE device\n", __FUNCTION__);
+		return -ENOENT;
+	}
+
+	a4l_info(dev, "Found device %d %s\n", i, ni_670x_boards[i].name);
+
+	devpriv->irq_polarity = PCIMIO_IRQ_POLARITY;
+	devpriv->irq_pin = 0;
+
+	devpriv->mite = mite;
+	devpriv->board_ptr = board;
+
+	ret = a4l_mite_setup(devpriv->mite, 0);
+	if (ret < 0) {
+		a4l_err(dev, "%s: error setting up mite\n", __FUNCTION__);
+		return ret;
+	}
+
+	irq = mite_irq(devpriv->mite);
+	devpriv->irq = irq;
+
+	a4l_info(dev, "found %s board\n", board->name);
+
+	for (i = 0; i < 2; i++) {
+		struct a4l_subdevice *subd =
+			a4l_alloc_subd(setup_subds[i].sizeof_priv, NULL);
+
+		if (subd == NULL) {
+			a4l_err(dev,
+				"%s: cannot allocate subdevice\n",
+				__FUNCTION__);
+			return -ENOMEM;
+		}
+
+		err = a4l_add_subd(dev, subd);
+		if (err != i) {
+			a4l_err(dev,
+				"%s: cannot add subdevice\n",
+				__FUNCTION__);
+			return err;
+		}
+
+		setup_subds[i].setup_func (subd);
+	}
+
+	/* Config of misc registers */
+	writel(0x10, devpriv->mite->daq_io_addr + MISC_CONTROL_OFFSET);
+	/* Config of ao registers */
+	writel(0x00, devpriv->mite->daq_io_addr + AO_CONTROL_OFFSET);
+
+	a4l_info(dev, "ni670x attached\n");
+
+	return 0;
+}
+
+static int ni_670x_detach(struct a4l_device *dev)
+{
+	a4l_info(dev, "ni670x detach procedure started...\n");
+
+	if(dev->priv != NULL && devpriv->mite != NULL)
+		a4l_mite_unsetup(devpriv->mite);
+
+	a4l_info(dev, "ni670x detach procedure succeeded...\n");
+
+	return 0;
+}
+
+
+static int ni_670x_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+	int chan = CR_CHAN(insn->chan_desc);
+	struct ni_670x_subd_priv *subdpriv =
+		(struct ni_670x_subd_priv *)subd->priv;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		subdpriv->io_bits |= 1 << chan;
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		subdpriv->io_bits &= ~(1 << chan);
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (subdpriv->io_bits & (1 << chan)) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+
+	writel(subdpriv->io_bits,
+	       devpriv->mite->daq_io_addr + DIO_PORT0_DIR_OFFSET);
+
+	return 0;
+}
+
+static int ni_670x_ao_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	int i;
+	unsigned int tmp;
+	unsigned int* dtmp;
+	int chan;
+	dtmp = (unsigned int*)insn->data;
+	chan = CR_CHAN(insn->chan_desc);
+
+	/* Channel number mapping :
+
+	   NI 6703/ NI 6704     | NI 6704 Only
+	   ----------------------------------------------------
+	   vch(0)       :       0       | ich(16)       :       1
+	   vch(1)       :       2       | ich(17)       :       3
+	   .    :       .       |   .                   .
+	   .    :       .       |   .                   .
+	   .    :       .       |   .                   .
+	   vch(15)      :       30      | ich(31)       :       31 */
+
+	for (i = 0; i < insn->data_size / sizeof(unsigned int); i++) {
+
+		tmp = dtmp[i];
+
+		/* First write in channel register which channel to use */
+		writel(((chan & 15) << 1) | ((chan & 16) >> 4),
+		       private (subd->dev)->mite->daq_io_addr + AO_CHAN_OFFSET);
+
+		/* write channel value */
+		writel(dtmp[i],
+		       private(subd->dev)->mite->daq_io_addr + AO_VALUE_OFFSET);
+		private(subd->dev)->ao_readback[chan] = tmp;
+	}
+
+   return 0;
+}
+
+static int ni_670x_ao_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	int i;
+	unsigned int* dtmp;
+	int chan = CR_CHAN(insn->chan_desc);
+
+	dtmp = (unsigned int*)insn->data;
+
+	for (i = 0; i < insn->data_size / sizeof(unsigned int); i++)
+		dtmp[i] = private(subd->dev)->ao_readback[chan];
+
+	return 0;
+}
+
+
+static int ni_670x_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	return -ENOSYS;
+}
+
+MODULE_DESCRIPTION("Analogy driver for NI670x series cards");
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/analogy/national_instruments/Kconfig	2022-03-21 12:58:31.138871906 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/mio_common.c	1970-01-01 01:00:00.000000000 +0100
+
+config XENO_DRIVERS_ANALOGY_NI_MITE
+	depends on XENO_DRIVERS_ANALOGY && PCI
+	tristate "NI MITE driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_TIO
+	depends on XENO_DRIVERS_ANALOGY
+	tristate "NI TIO driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_MIO       
+	depends on XENO_DRIVERS_ANALOGY && XENO_DRIVERS_ANALOGY_NI_TIO && PCI
+	tristate "NI MIO driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_PCIMIO       
+	depends on XENO_DRIVERS_ANALOGY && PCI
+	select XENO_DRIVERS_ANALOGY_NI_MITE
+	select XENO_DRIVERS_ANALOGY_NI_TIO
+	select XENO_DRIVERS_ANALOGY_NI_MIO
+	select XENO_DRIVERS_ANALOGY_8255
+	tristate "NI PCIMIO driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_670x       
+	depends on EXPERIMENTAL && XENO_DRIVERS_ANALOGY && PCI
+	select XENO_DRIVERS_ANALOGY_NI_MITE
+	select XENO_DRIVERS_ANALOGY_NI_TIO
+	select XENO_DRIVERS_ANALOGY_NI_MIO
+	select XENO_DRIVERS_ANALOGY_8255
+	tristate "NI 670X driver (EXPERIMENTAL)"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_660x       
+	depends on EXPERIMENTAL && XENO_DRIVERS_ANALOGY && PCI
+	select XENO_DRIVERS_ANALOGY_NI_MITE
+	select XENO_DRIVERS_ANALOGY_NI_TIO
+	select XENO_DRIVERS_ANALOGY_NI_MIO
+	select XENO_DRIVERS_ANALOGY_8255
+	tristate "NI 660X driver (EXPERIMENTAL)"
+	default n
+++ linux-patched/drivers/xenomai/analogy/national_instruments/mio_common.c	2022-03-21 12:58:31.130871984 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/mite.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for DAQ-STC based boards
+ *
+ * Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Description: DAQ-STC systems
+ *
+ * References:
+ * 340747b.pdf  AT-MIO E series Register-Level Programmer Manual
+ * 341079b.pdf  PCI E Series Register-Level Programmer Manual
+ * 340934b.pdf  DAQ-STC reference manual
+ * 322080b.pdf  6711/6713/6715 User Manual
+ * 320945c.pdf  PCI E Series User Manual
+ * 322138a.pdf  PCI-6052E and DAQPad-6052E User Manual
+ * 320517c.pdf  AT E Series User manual (obsolete)
+ * 320517f.pdf  AT E Series User manual
+ * 320906c.pdf  Maximum signal ratings
+ * 321066a.pdf  About 16x
+ * 321791a.pdf  Discontinuation of at-mio-16e-10 rev. c
+ * 321808a.pdf  About at-mio-16e-10 rev P
+ * 321837a.pdf  Discontinuation of at-mio-16de-10 rev d
+ * 321838a.pdf  About at-mio-16de-10 rev N
+ *
+ * ISSUES:
+ * - The interrupt routine needs to be cleaned up
+ * - S-Series PCI-6143 support has been added but is not fully tested
+ *   as yet. Terry Barnaby, BEAM Ltd.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "../intel/8255.h"
+#include "mite.h"
+#include "ni_stc.h"
+#include "ni_mio.h"
+
+#define NI_TIMEOUT 1000
+
+/* Note: this table must match the ai_gain_* definitions */
+static const short ni_gainlkup[][16] = {
+	/* ai_gain_16 */
+	{0, 1, 2, 3, 4, 5, 6, 7, 0x100, 0x101, 0x102, 0x103, 0x104, 0x105,
+	 0x106, 0x107},
+	/* ai_gain_8 */
+	{1, 2, 4, 7, 0x101, 0x102, 0x104, 0x107},
+	/* ai_gain_14 */
+	{1, 2, 3, 4, 5, 6, 7, 0x101, 0x102, 0x103, 0x104, 0x105, 0x106,
+	 0x107},
+	/* ai_gain_4 */
+	{0, 1, 4, 7},
+	/* ai_gain_611x */
+	{0x00a, 0x00b, 0x001, 0x002, 0x003, 0x004, 0x005, 0x006},
+	/* ai_gain_622x */
+	{0, 1, 4, 5},
+	/* ai_gain_628x */
+	{1, 2, 3, 4, 5, 6, 7},
+	/* ai_gain_6143 */
+	{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+};
+
+struct a4l_rngtab rng_ni_E_ai = {16, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2.5, 2.5),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.25, 0.25),
+	RANGE_V(-0.1, 0.1),
+	RANGE_V(-0.05, 0.05),
+	RANGE_V(0, 20),
+	RANGE_V(0, 10),
+	RANGE_V(0, 5),
+	RANGE_V(0, 2),
+	RANGE_V(0, 1),
+	RANGE_V(0, 0.5),
+	RANGE_V(0, 0.2),
+	RANGE_V(0, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai =
+	RNG_GLOBAL(rng_ni_E_ai);
+
+struct a4l_rngtab rng_ni_E_ai_limited = {8, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.1, 0.1),
+	RANGE_V(0, 10),
+	RANGE_V(0, 5),
+	RANGE_V(0, 1),
+	RANGE_V(0, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_limited =
+	RNG_GLOBAL(rng_ni_E_ai_limited);
+
+struct a4l_rngtab rng_ni_E_ai_limited14 = {14, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2, 2),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.2, 0.2),
+	RANGE_V(-0.1, 0.1),
+	RANGE_V(0, 10),
+	RANGE_V(0, 5),
+	RANGE_V(0, 2),
+	RANGE_V(0, 1),
+	RANGE_V(0, 0.5),
+	RANGE_V(0, 0.2),
+	RANGE_V(0, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_limited14 =
+	RNG_GLOBAL(rng_ni_E_ai_limited14);
+
+struct a4l_rngtab rng_ni_E_ai_bipolar4 = {4, {
+	RANGE_V(-10,10),
+	RANGE_V(-5, 5),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.05, 0.05),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_bipolar4 =
+	RNG_GLOBAL(rng_ni_E_ai_bipolar4);
+
+struct a4l_rngtab rng_ni_E_ai_611x = {8, {
+	RANGE_V(-50, 50),
+	RANGE_V(-20, 20),
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2, 2),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.2, 0.2),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_611x =
+	RNG_GLOBAL(rng_ni_E_ai_611x);
+
+struct a4l_rngtab rng_ni_M_ai_622x = {4, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.2, 0.2),
+}};
+struct a4l_rngdesc a4l_range_ni_M_ai_622x =
+	RNG_GLOBAL(rng_ni_M_ai_622x);
+
+struct a4l_rngtab rng_ni_M_ai_628x = {7, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2, 2),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.2, 0.2),
+	RANGE_V(-0.1, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_M_ai_628x =
+	RNG_GLOBAL(rng_ni_M_ai_628x);
+
+struct a4l_rngtab rng_ni_S_ai_6143 = {1, {
+	RANGE_V(-5, 5),
+}};
+struct a4l_rngdesc a4l_range_ni_S_ai_6143 =
+	RNG_GLOBAL(rng_ni_S_ai_6143);
+
+
+struct a4l_rngtab rng_ni_E_ao_ext = {4, {
+	RANGE_V(-10, 10),
+	RANGE_V(0, 10),
+	RANGE_ext(-1, 1),
+	RANGE_ext(0, 1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ao_ext =
+	RNG_GLOBAL(rng_ni_E_ao_ext);
+
+struct a4l_rngdesc *ni_range_lkup[] = {
+	&a4l_range_ni_E_ai,
+	&a4l_range_ni_E_ai_limited,
+	&a4l_range_ni_E_ai_limited14,
+	&a4l_range_ni_E_ai_bipolar4,
+	&a4l_range_ni_E_ai_611x,
+	&a4l_range_ni_M_ai_622x,
+	&a4l_range_ni_M_ai_628x,
+	&a4l_range_ni_S_ai_6143
+};
+
+static const int num_adc_stages_611x = 3;
+
+static void ni_handle_fifo_dregs(struct a4l_subdevice *subd);
+static void get_last_sample_611x(struct a4l_subdevice *subd);
+static void get_last_sample_6143(struct a4l_subdevice *subd);
+static void handle_cdio_interrupt(struct a4l_device *dev);
+static void ni_load_channelgain_list(struct a4l_device *dev,
+				     unsigned int n_chan, unsigned int *list);
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+static void ni_handle_fifo_half_full(struct a4l_subdevice *subd);
+static int ni_ao_fifo_half_empty(struct a4l_subdevice *subd);
+#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static inline void ni_set_bitfield(struct a4l_device *dev,
+				   int reg,
+				   unsigned int bit_mask,
+				   unsigned int bit_values)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->soft_reg_copy_lock, flags);
+	switch (reg) {
+	case Interrupt_A_Enable_Register:
+		devpriv->int_a_enable_reg &= ~bit_mask;
+		devpriv->int_a_enable_reg |= bit_values & bit_mask;
+		devpriv->stc_writew(dev, devpriv->int_a_enable_reg,
+				    Interrupt_A_Enable_Register);
+		break;
+	case Interrupt_B_Enable_Register:
+		devpriv->int_b_enable_reg &= ~bit_mask;
+		devpriv->int_b_enable_reg |= bit_values & bit_mask;
+		devpriv->stc_writew(dev, devpriv->int_b_enable_reg,
+				    Interrupt_B_Enable_Register);
+		break;
+	case IO_Bidirection_Pin_Register:
+		devpriv->io_bidirection_pin_reg &= ~bit_mask;
+		devpriv->io_bidirection_pin_reg |= bit_values & bit_mask;
+		devpriv->stc_writew(dev, devpriv->io_bidirection_pin_reg,
+				    IO_Bidirection_Pin_Register);
+		break;
+	case AI_AO_Select:
+		devpriv->ai_ao_select_reg &= ~bit_mask;
+		devpriv->ai_ao_select_reg |= bit_values & bit_mask;
+		ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select);
+		break;
+	case G0_G1_Select:
+		devpriv->g0_g1_select_reg &= ~bit_mask;
+		devpriv->g0_g1_select_reg |= bit_values & bit_mask;
+		ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select);
+		break;
+	default:
+		a4l_err(dev,
+			"Warning %s() called with invalid register\n",
+			__FUNCTION__);
+		a4l_err(dev,"reg is %d\n", reg);
+		break;
+	}
+
+	mmiowb();
+	rtdm_lock_put_irqrestore(&devpriv->soft_reg_copy_lock, flags);
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_ai_drain_dma(struct a4l_subdevice *subd);
+
+static inline void ni_set_ai_dma_channel(struct a4l_device * dev, int channel)
+{
+	unsigned bitfield;
+
+	if (channel >= 0) {
+		bitfield =
+			(ni_stc_dma_channel_select_bitfield(channel) <<
+			 AI_DMA_Select_Shift) & AI_DMA_Select_Mask;
+	} else {
+		bitfield = 0;
+	}
+	ni_set_bitfield(dev, AI_AO_Select, AI_DMA_Select_Mask, bitfield);
+}
+
+static inline void ni_set_ao_dma_channel(struct a4l_device * dev, int channel)
+{
+	unsigned bitfield;
+
+	if (channel >= 0) {
+		bitfield =
+			(ni_stc_dma_channel_select_bitfield(channel) <<
+			 AO_DMA_Select_Shift) & AO_DMA_Select_Mask;
+	} else {
+		bitfield = 0;
+	}
+	ni_set_bitfield(dev, AI_AO_Select, AO_DMA_Select_Mask, bitfield);
+}
+
+static inline void ni_set_gpct_dma_channel(struct a4l_device * dev,
+					   unsigned gpct_index, int mite_channel)
+{
+	unsigned bitfield;
+
+	if (mite_channel >= 0) {
+		bitfield = GPCT_DMA_Select_Bits(gpct_index, mite_channel);
+	} else {
+		bitfield = 0;
+	}
+	ni_set_bitfield(dev, G0_G1_Select, GPCT_DMA_Select_Mask(gpct_index),
+			bitfield);
+}
+
+static inline void ni_set_cdo_dma_channel(struct a4l_device * dev, int mite_channel)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->soft_reg_copy_lock, flags);
+	devpriv->cdio_dma_select_reg &= ~CDO_DMA_Select_Mask;
+	if (mite_channel >= 0) {
+		/*XXX just guessing
+		  ni_stc_dma_channel_select_bitfield() returns the right
+		  bits, under the assumption the cdio dma selection
+		  works just like ai/ao/gpct. Definitely works for dma
+		  channels 0 and 1. */
+		devpriv->cdio_dma_select_reg |=
+			(ni_stc_dma_channel_select_bitfield(mite_channel) <<
+			 CDO_DMA_Select_Shift) & CDO_DMA_Select_Mask;
+	}
+	ni_writeb(devpriv->cdio_dma_select_reg, M_Offset_CDIO_DMA_Select);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&devpriv->soft_reg_copy_lock, flags);
+}
+
+static int ni_request_ai_mite_channel(struct a4l_device * dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	BUG_ON(devpriv->ai_mite_chan);
+	devpriv->ai_mite_chan =
+		mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
+	if (devpriv->ai_mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock,
+				      flags);
+		a4l_err(dev,
+			"ni_request_ai_mite_channel: "
+			"failed to reserve mite dma channel for analog input.");
+		return -EBUSY;
+	}
+	devpriv->ai_mite_chan->dir = A4L_INPUT;
+	ni_set_ai_dma_channel(dev, devpriv->ai_mite_chan->channel);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+	return 0;
+}
+
+static int ni_request_ao_mite_channel(struct a4l_device * dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	BUG_ON(devpriv->ao_mite_chan);
+	devpriv->ao_mite_chan =
+		mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
+	if (devpriv->ao_mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock,
+				      flags);
+		a4l_err(dev,
+			"ni_request_ao_mite_channel: "
+			"failed to reserve mite dma channel for analog outut.");
+		return -EBUSY;
+	}
+	devpriv->ao_mite_chan->dir = A4L_OUTPUT;
+	ni_set_ao_dma_channel(dev, devpriv->ao_mite_chan->channel);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+	return 0;
+}
+
+static int ni_request_gpct_mite_channel(struct a4l_device * dev,
+					unsigned gpct_index, int direction)
+{
+	unsigned long flags;
+	struct mite_channel *mite_chan;
+
+	BUG_ON(gpct_index >= NUM_GPCT);
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	BUG_ON(devpriv->counter_dev->counters[gpct_index]->mite_chan);
+	mite_chan = mite_request_channel(devpriv->mite,
+					 devpriv->gpct_mite_ring[gpct_index]);
+	if (mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock,
+				      flags);
+		a4l_err(dev,
+			"ni_request_gpct_mite_channel: "
+			"failed to reserve mite dma channel for counter.");
+		return -EBUSY;
+	}
+	mite_chan->dir = direction;
+	a4l_ni_tio_set_mite_channel(devpriv->counter_dev->counters[gpct_index],
+				mite_chan);
+	ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+	return 0;
+}
+
+static int ni_request_cdo_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+	int err = 0;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	/* No channel should be allocated... */
+	BUG_ON(devpriv->cdo_mite_chan);
+	/* ...until now */
+	devpriv->cdo_mite_chan =
+		mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
+
+	if (devpriv->cdo_mite_chan) {
+		devpriv->cdo_mite_chan->dir = A4L_OUTPUT;
+		ni_set_cdo_dma_channel(dev, devpriv->cdo_mite_chan->channel);
+	} else {
+		err = -EBUSY;
+		a4l_err(dev,
+			"ni_request_cdo_mite_channel: "
+			"failed to reserve mite dma channel "
+			"for correlated digital outut.");
+	}
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return err;
+}
+
+void ni_release_ai_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ai_mite_chan) {
+		ni_set_ai_dma_channel(dev, -1);
+		a4l_mite_release_channel(devpriv->ai_mite_chan);
+		devpriv->ai_mite_chan = NULL;
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_release_ao_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ao_mite_chan) {
+		ni_set_ao_dma_channel(dev, -1);
+		a4l_mite_release_channel(devpriv->ao_mite_chan);
+		devpriv->ao_mite_chan = NULL;
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_release_gpct_mite_channel(struct a4l_device *dev, unsigned gpct_index)
+{
+	unsigned long flags;
+
+	BUG_ON(gpct_index >= NUM_GPCT);
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->counter_dev->counters[gpct_index]->mite_chan) {
+		struct mite_channel *mite_chan =
+			devpriv->counter_dev->counters[gpct_index]->mite_chan;
+
+		ni_set_gpct_dma_channel(dev, gpct_index, -1);
+		a4l_ni_tio_set_mite_channel(devpriv->counter_dev->
+					counters[gpct_index], NULL);
+		a4l_mite_release_channel(mite_chan);
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_release_cdo_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->cdo_mite_chan) {
+		ni_set_cdo_dma_channel(dev, -1);
+		a4l_mite_release_channel(devpriv->cdo_mite_chan);
+		devpriv->cdo_mite_chan = NULL;
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_sync_ai_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ai_mite_chan)
+		a4l_mite_sync_input_dma(devpriv->ai_mite_chan, subd);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+}
+
+void mite_handle_b_linkc(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ao_mite_chan)
+		a4l_mite_sync_output_dma(devpriv->ao_mite_chan, subd);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+}
+
+static int ni_ao_wait_for_dma_load(struct a4l_subdevice *subd)
+{
+	static const int timeout = 10000;
+
+	struct a4l_device *dev = subd->dev;
+	struct a4l_buffer *buf = subd->buf;
+
+	int i;
+
+	for (i = 0; i < timeout; i++) {
+
+		int buffer_filled;
+		unsigned short b_status;
+
+		b_status = devpriv->stc_readw(dev, AO_Status_1_Register);
+
+		buffer_filled = test_bit(A4L_BUF_EOA_NR, &buf->flags);
+		buffer_filled |= (b_status & AO_FIFO_Half_Full_St);
+
+		if (buffer_filled)
+			break;
+
+		/* If we poll too often, the pci bus activity seems
+		   to slow the dma transfer down */
+		a4l_udelay(10);
+	}
+
+	if (i == timeout) {
+		a4l_err(dev,
+			"ni_ao_wait_for_dma_load: "
+			"timed out waiting for dma load");
+		return -EPIPE;
+	}
+
+	return 0;
+}
+
+
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static inline int ni_ai_drain_dma(struct a4l_subdevice *subd)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ni_request_ai_mite_channel(struct a4l_device * dev)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ni_request_ao_mite_channel(struct a4l_device * dev)
+{
+	return -ENOTSUPP;
+}
+
+static inline
+int ni_request_gpct_mite_channel(struct a4l_device * dev,
+				 unsigned gpct_index, int direction)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ni_request_cdo_mite_channel(struct a4l_device *dev)
+{
+	return -ENOTSUPP;
+}
+
+#define ni_release_ai_mite_channel(x) do { } while (0)
+#define ni_release_ao_mite_channel(x) do { } while (0)
+#define ni_release_gpct_mite_channel(x) do { } while (0)
+#define ni_release_cdo_mite_channel(x) do { } while (0)
+#define ni_sync_ai_dma(x) do { } while (0)
+#define mite_handle_b_linkc(x) do { } while (0)
+
+static inline int ni_ao_wait_for_dma_load(struct a4l_subdevice *subd)
+{
+	return -ENOTSUPP;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+/* E-series boards use the second irq signals to generate dma requests
+   for their counters */
+void ni_e_series_enable_second_irq(struct a4l_device *dev,
+				   unsigned gpct_index, short enable)
+{
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return;
+	switch (gpct_index) {
+	case 0:
+		if (enable) {
+			devpriv->stc_writew(dev, G0_Gate_Second_Irq_Enable,
+					    Second_IRQ_A_Enable_Register);
+		} else {
+			devpriv->stc_writew(dev, 0,
+					    Second_IRQ_A_Enable_Register);
+		}
+		break;
+	case 1:
+		if (enable) {
+			devpriv->stc_writew(dev, G1_Gate_Second_Irq_Enable,
+					    Second_IRQ_B_Enable_Register);
+		} else {
+			devpriv->stc_writew(dev, 0,
+					    Second_IRQ_B_Enable_Register);
+		}
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+void ni_clear_ai_fifo(struct a4l_device *dev)
+{
+	if (boardtype.reg_type == ni_reg_6143) {
+		/* Flush the 6143 data FIFO */
+		ni_writel(0x10, AIFIFO_Control_6143); /* Flush fifo */
+		ni_writel(0x00, AIFIFO_Control_6143); /* Flush fifo */
+		while (ni_readl(AIFIFO_Status_6143) & 0x10); /* Wait for complete */
+	} else {
+		devpriv->stc_writew(dev, 1, ADC_FIFO_Clear);
+		if (boardtype.reg_type == ni_reg_625x) {
+			ni_writeb(0, M_Offset_Static_AI_Control(0));
+			ni_writeb(1, M_Offset_Static_AI_Control(0));
+		}
+	}
+}
+
+#define ao_win_out(data, addr) ni_ao_win_outw(dev, data, addr)
+static inline void ni_ao_win_outw(struct a4l_device *dev, uint16_t data, int addr)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(addr, AO_Window_Address_611x);
+	ni_writew(data, AO_Window_Data_611x);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+}
+
+static inline void ni_ao_win_outl(struct a4l_device *dev, uint32_t data, int addr)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(addr, AO_Window_Address_611x);
+	ni_writel(data, AO_Window_Data_611x);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+}
+
+static inline unsigned short ni_ao_win_inw(struct a4l_device *dev, int addr)
+{
+	unsigned long flags;
+	unsigned short data;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(addr, AO_Window_Address_611x);
+	data = ni_readw(AO_Window_Data_611x);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+	return data;
+}
+
+/*
+ * ni_set_bits( ) allows different parts of the ni_mio_common driver
+ * to share registers (such as Interrupt_A_Register) without interfering
+ * with each other.
+ *
+ * NOTE: the switch/case statements are optimized out for a constant
+ * argument so this is actually quite fast--- If you must wrap another
+ * function around this make it inline to avoid a large speed penalty.
+ *
+ * value should only be 1 or 0.
+ */
+
+static inline void ni_set_bits(struct a4l_device *dev,
+			       int reg, unsigned bits, unsigned value)
+{
+	unsigned bit_values;
+
+	if (value)
+		bit_values = bits;
+	else
+		bit_values = 0;
+
+	ni_set_bitfield(dev, reg, bits, bit_values);
+}
+
+static void shutdown_ai_command(struct a4l_subdevice *subd)
+{
+	ni_ai_drain_dma(subd);
+	ni_handle_fifo_dregs(subd);
+	get_last_sample_611x(subd);
+	get_last_sample_6143(subd);
+
+	/* TODO: stop the acquisiton */
+}
+
+static void ni_handle_eos(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	if (devpriv->aimode == AIMODE_SCAN) {
+		static const int timeout = 10;
+		int i;
+
+		for (i = 0; i < timeout; i++) {
+			ni_sync_ai_dma(subd);
+			/* TODO: stop when the transfer is really over */
+			a4l_udelay(1);
+		}
+	}
+
+	/* Handle special case of single scan using AI_End_On_End_Of_Scan */
+	if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) {
+		shutdown_ai_command(subd);
+	}
+}
+
+static void ni_event(struct a4l_subdevice * subd)
+{
+	/* Temporary hack */
+	struct a4l_buffer *buf = subd->buf;
+
+	if(test_bit(A4L_BUF_ERROR_NR, &buf->flags)) {
+		if (subd->cancel != NULL)
+			subd->cancel(subd);
+	}
+
+	a4l_buf_evt(subd, 0);
+
+}
+
+static void handle_gpct_interrupt(struct a4l_device *dev, unsigned short counter_index)
+{
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	struct ni_gpct *counter = devpriv->counter_dev->counters[counter_index];
+	a4l_ni_tio_handle_interrupt(counter, dev);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+}
+
+#ifdef CONFIG_DEBUG_MIO_COMMON
+static const char *const status_a_strings[] = {
+	"passthru0", "fifo", "G0_gate", "G0_TC",
+	"stop", "start", "sc_tc", "start1",
+	"start2", "sc_tc_error", "overflow", "overrun",
+	"fifo_empty", "fifo_half_full", "fifo_full", "interrupt_a"
+};
+
+static void ni_mio_print_status_a(int status)
+{
+	int i;
+
+	__a4l_info("A status:");
+	for (i = 15; i >= 0; i--) {
+		if (status & (1 << i)) {
+			__a4l_info(" %s", status_a_strings[i]);
+		}
+	}
+	__a4l_info("\n");
+}
+
+static const char *const status_b_strings[] = {
+	"passthru1", "fifo", "G1_gate", "G1_TC",
+	"UI2_TC", "UPDATE", "UC_TC", "BC_TC",
+	"start1", "overrun", "start", "bc_tc_error",
+	"fifo_empty", "fifo_half_full", "fifo_full", "interrupt_b"
+};
+
+static void ni_mio_print_status_b(int status)
+{
+	int i;
+
+	__a4l_info("B status:");
+	for (i = 15; i >= 0; i--) {
+		if (status & (1 << i)) {
+			__a4l_info(" %s", status_b_strings[i]);
+		}
+	}
+	__a4l_info("\n");
+}
+
+#else /* !CONFIG_DEBUG_MIO_COMMON */
+
+#define ni_mio_print_status_a(x)
+#define ni_mio_print_status_b(x)
+
+#endif /* CONFIG_DEBUG_MIO_COMMON */
+
+static void ack_a_interrupt(struct a4l_device *dev, unsigned short a_status)
+{
+	unsigned short ack = 0;
+
+	if (a_status & AI_SC_TC_St) {
+		ack |= AI_SC_TC_Interrupt_Ack;
+	}
+	if (a_status & AI_START1_St) {
+		ack |= AI_START1_Interrupt_Ack;
+	}
+	if (a_status & AI_START_St) {
+		ack |= AI_START_Interrupt_Ack;
+	}
+	if (a_status & AI_STOP_St) {
+		/* not sure why we used to ack the START here also,
+		   instead of doing it independently. Frank Hess
+		   2007-07-06 */
+		ack |= AI_STOP_Interrupt_Ack;
+	}
+	if (ack)
+		devpriv->stc_writew(dev, ack, Interrupt_A_Ack_Register);
+}
+
+static void handle_a_interrupt(struct a4l_device *dev,
+			       unsigned short status,unsigned int ai_mite_status)
+{
+
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AI_SUBDEV);
+
+	/* 67xx boards don't have ai subdevice, but their gpct0
+	   might generate an a interrupt. */
+
+	if((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED)
+		return;
+
+	a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+		"a_status=%04x ai_mite_status=%08x\n",status, ai_mite_status);
+	ni_mio_print_status_a(status);
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	if (ai_mite_status & CHSR_LINKC)
+		ni_sync_ai_dma(subd);
+
+	if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
+			       CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
+			       CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
+		a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+			"unknown mite interrupt, ack! (ai_mite_status=%08x)\n",
+			ai_mite_status);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	/* Test for all uncommon interrupt events at the same time */
+	if (status & (AI_Overrun_St | AI_Overflow_St | AI_SC_TC_Error_St |
+		      AI_SC_TC_St | AI_START1_St)) {
+		if (status == 0xffff) {
+			a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+				"a_status=0xffff.  Card removed?\n");
+			/* TODO: we probably aren't even running a command now,
+			   so it's a good idea to be careful.
+			   we should check the transfer status */
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+			ni_event(subd);
+			return;
+		}
+		if (status & (AI_Overrun_St | AI_Overflow_St |
+			      AI_SC_TC_Error_St)) {
+			a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+				"ai error a_status=%04x\n", status);
+			ni_mio_print_status_a(status);
+
+			shutdown_ai_command(subd);
+
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+			ni_event(subd);
+
+			return;
+		}
+		if (status & AI_SC_TC_St) {
+			a4l_dbg(1, drv_dbg, dev, "ni_mio_common: SC_TC interrupt\n");
+			if (!devpriv->ai_continuous) {
+				shutdown_ai_command(subd);
+			}
+		}
+	}
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+	if (status & AI_FIFO_Half_Full_St) {
+		int i;
+		static const int timeout = 10;
+		/* PCMCIA cards (at least 6036) seem to stop producing
+		   interrupts if we fail to get the fifo less than half
+		   full, so loop to be sure. */
+		for (i = 0; i < timeout; ++i) {
+			ni_handle_fifo_half_full(subd);
+			if ((devpriv->stc_readw(dev, AI_Status_1_Register) &
+			     AI_FIFO_Half_Full_St) == 0)
+				break;
+		}
+	}
+#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	if ((status & AI_STOP_St)) {
+		ni_handle_eos(subd);
+	}
+
+	ni_event(subd);
+
+	status = devpriv->stc_readw(dev, AI_Status_1_Register);
+	if (status & Interrupt_A_St)
+		a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+			" didn't clear interrupt? status=0x%x\n", status);
+}
+
+static void ack_b_interrupt(struct a4l_device *dev, unsigned short b_status)
+{
+	unsigned short ack = 0;
+	if (b_status & AO_BC_TC_St) {
+		ack |= AO_BC_TC_Interrupt_Ack;
+	}
+	if (b_status & AO_Overrun_St) {
+		ack |= AO_Error_Interrupt_Ack;
+	}
+	if (b_status & AO_START_St) {
+		ack |= AO_START_Interrupt_Ack;
+	}
+	if (b_status & AO_START1_St) {
+		ack |= AO_START1_Interrupt_Ack;
+	}
+	if (b_status & AO_UC_TC_St) {
+		ack |= AO_UC_TC_Interrupt_Ack;
+	}
+	if (b_status & AO_UI2_TC_St) {
+		ack |= AO_UI2_TC_Interrupt_Ack;
+	}
+	if (b_status & AO_UPDATE_St) {
+		ack |= AO_UPDATE_Interrupt_Ack;
+	}
+	if (ack)
+		devpriv->stc_writew(dev, ack, Interrupt_B_Ack_Register);
+}
+
+static void handle_b_interrupt(struct a4l_device * dev,
+			       unsigned short b_status, unsigned int ao_mite_status)
+{
+
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AO_SUBDEV);
+
+	a4l_dbg(1, drv_dbg, dev,
+		"ni_mio_common: interrupt: b_status=%04x m1_status=%08x\n",
+		b_status, ao_mite_status);
+
+	ni_mio_print_status_b(b_status);
+
+	if (b_status == 0xffff)
+		return;
+
+	if (b_status & AO_Overrun_St) {
+		a4l_err(dev,
+			"ni_mio_common: interrupt: "
+			"AO FIFO underrun status=0x%04x status2=0x%04x\n",
+			b_status,
+			devpriv->stc_readw(dev, AO_Status_2_Register));
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+
+	if (b_status & AO_BC_TC_St) {
+		a4l_dbg(1, drv_dbg, dev,
+			"ni_mio_common: interrupt: "
+			"AO BC_TC status=0x%04x status2=0x%04x\n",
+			b_status, devpriv->stc_readw(dev, AO_Status_2_Register));
+		a4l_buf_evt(subd, A4L_BUF_EOA);
+	}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+	if (ao_mite_status & CHSR_STOPS) {
+		a4l_dbg(1, drv_dbg, dev,
+			"ni_mio_common: interrupt: MITE transfer stopped\n");
+	} else if (ao_mite_status & CHSR_LINKC) {
+		/* Currently, mite.c requires us to handle LINKC */
+		mite_handle_b_linkc(subd);
+	}
+
+	if (ao_mite_status &
+	    ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
+	      CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
+	      CHSR_SABORT | CHSR_STOPS | CHSR_XFERR | CHSR_LxERR_mask)) {
+		a4l_err(dev,
+			"unknown mite interrupt, ack! (ao_mite_status=%08x)\n",
+			 ao_mite_status);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+	if (b_status & AO_FIFO_Request_St) {
+		int ret;
+
+		ret = ni_ao_fifo_half_empty(subd);
+		if (!ret) {
+			a4l_err(dev,
+				"ni_mio_common: "
+				"interrupt: AO buffer underrun\n");
+			ni_set_bits(dev, Interrupt_B_Enable_Register,
+				    AO_FIFO_Interrupt_Enable |
+				    AO_Error_Interrupt_Enable, 0);
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+		}
+	}
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	ni_event(subd);
+}
+
+int a4l_ni_E_interrupt(unsigned int irq, void *d)
+{
+	struct a4l_device *dev = d;
+	unsigned short a_status;
+	unsigned short b_status;
+	unsigned int ai_mite_status = 0;
+	unsigned int ao_mite_status = 0;
+	unsigned long flags;
+	struct mite_struct *mite = devpriv->mite;
+
+	/* Make sure dev->attached is checked before handler does
+	   anything else. */
+	smp_mb();
+
+	/* lock to avoid race with a4l_poll */
+	rtdm_lock_get_irqsave(&dev->lock, flags);
+	a_status = devpriv->stc_readw(dev, AI_Status_1_Register);
+	b_status = devpriv->stc_readw(dev, AO_Status_1_Register);
+	if (mite) {
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		rtdm_lock_get(&devpriv->mite_channel_lock);
+		if (devpriv->ai_mite_chan) {
+			ai_mite_status = a4l_mite_get_status(devpriv->ai_mite_chan);
+			if (ai_mite_status & CHSR_LINKC)
+				writel(CHOR_CLRLC,
+				       devpriv->mite->mite_io_addr +
+				       MITE_CHOR(devpriv->ai_mite_chan->channel));
+		}
+		if (devpriv->ao_mite_chan) {
+			ao_mite_status = a4l_mite_get_status(devpriv->ao_mite_chan);
+			if (ao_mite_status & CHSR_LINKC)
+				writel(CHOR_CLRLC,
+				       mite->mite_io_addr +
+				       MITE_CHOR(devpriv->ao_mite_chan->channel));
+		}
+		rtdm_lock_put(&devpriv->mite_channel_lock);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	}
+	ack_a_interrupt(dev, a_status);
+	ack_b_interrupt(dev, b_status);
+	if ((a_status & Interrupt_A_St) || (ai_mite_status & CHSR_INT))
+		handle_a_interrupt(dev, a_status, ai_mite_status);
+	if ((b_status & Interrupt_B_St) || (ao_mite_status & CHSR_INT))
+		handle_b_interrupt(dev, b_status, ao_mite_status);
+	handle_gpct_interrupt(dev, 0);
+	handle_gpct_interrupt(dev, 1);
+	handle_cdio_interrupt(dev);
+
+	rtdm_lock_put_irqrestore(&dev->lock, flags);
+	return 0;
+}
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static void ni_ao_fifo_load(struct a4l_subdevice *subd, int n)
+{
+	struct a4l_device *dev = subd->dev;
+	sampl_t d;
+	u32 packed_data;
+	int i, err = 1;
+
+	for (i = 0; i < n; i++) {
+		err = a4l_buf_get(subd, &d, sizeof(sampl_t));
+		if (err != 0)
+			break;
+
+		if (boardtype.reg_type & ni_reg_6xxx_mask) {
+			packed_data = d & 0xffff;
+			/* 6711 only has 16 bit wide ao fifo */
+			if (boardtype.reg_type != ni_reg_6711) {
+				err = a4l_buf_get(subd, &d, sizeof(sampl_t));
+				if (err != 0)
+					break;
+				i++;
+				packed_data |= (d << 16) & 0xffff0000;
+			}
+			ni_writel(packed_data, DAC_FIFO_Data_611x);
+		} else {
+			ni_writew(d, DAC_FIFO_Data);
+		}
+	}
+	if (err != 0) {
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+}
+
+/*
+ *  There's a small problem if the FIFO gets really low and we
+ *  don't have the data to fill it.  Basically, if after we fill
+ *  the FIFO with all the data available, the FIFO is _still_
+ *  less than half full, we never clear the interrupt.  If the
+ *  IRQ is in edge mode, we never get another interrupt, because
+ *  this one wasn't cleared.  If in level mode, we get flooded
+ *  with interrupts that we can't fulfill, because nothing ever
+ *  gets put into the buffer.
+ *
+ *  This kind of situation is recoverable, but it is easier to
+ *  just pretend we had a FIFO underrun, since there is a good
+ *  chance it will happen anyway.  This is _not_ the case for
+ *  RT code, as RT code might purposely be running close to the
+ *  metal.  Needs to be fixed eventually.
+ */
+static int ni_ao_fifo_half_empty(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	int n;
+
+	n = a4l_buf_count(subd);
+	if (n == 0) {
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+		return 0;
+	}
+
+	n /= sizeof(sampl_t);
+	if (n > boardtype.ao_fifo_depth / 2)
+		n = boardtype.ao_fifo_depth / 2;
+
+	ni_ao_fifo_load(subd, n);
+
+	return 1;
+}
+
+static int ni_ao_prep_fifo(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	int n;
+
+	/* Reset fifo */
+	devpriv->stc_writew(dev, 1, DAC_FIFO_Clear);
+	if (boardtype.reg_type & ni_reg_6xxx_mask)
+		ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x);
+
+	/* Load some data */
+	n = a4l_buf_count(subd);
+	if (n == 0)
+		return 0;
+
+	n /= sizeof(sampl_t);
+	if (n > boardtype.ao_fifo_depth)
+		n = boardtype.ao_fifo_depth;
+
+	ni_ao_fifo_load(subd, n);
+
+	return n;
+}
+
+static void ni_ai_fifo_read(struct a4l_subdevice *subd, int n)
+{
+	struct a4l_device *dev = subd->dev;
+	int i;
+
+	if (boardtype.reg_type == ni_reg_611x) {
+		sampl_t data[2];
+		u32 dl;
+
+		for (i = 0; i < n / 2; i++) {
+			dl = ni_readl(ADC_FIFO_Data_611x);
+			/* This may get the hi/lo data in the wrong order */
+			data[0] = (dl >> 16) & 0xffff;
+			data[1] = dl & 0xffff;
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+		}
+		/* Check if there's a single sample stuck in the FIFO */
+		if (n % 2) {
+			dl = ni_readl(ADC_FIFO_Data_611x);
+			data[0] = dl & 0xffff;
+			a4l_buf_put(subd, &data[0], sizeof(sampl_t));
+		}
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		sampl_t data[2];
+		u32 dl;
+
+		/* This just reads the FIFO assuming the data is
+		   present, no checks on the FIFO status are performed */
+		for (i = 0; i < n / 2; i++) {
+			dl = ni_readl(AIFIFO_Data_6143);
+
+			data[0] = (dl >> 16) & 0xffff;
+			data[1] = dl & 0xffff;
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+		}
+		if (n % 2) {
+			/* Assume there is a single sample stuck in the FIFO.
+			   Get stranded sample into FIFO */
+			ni_writel(0x01, AIFIFO_Control_6143);
+			dl = ni_readl(AIFIFO_Data_6143);
+			data[0] = (dl >> 16) & 0xffff;
+			a4l_buf_put(subd, &data[0], sizeof(sampl_t));
+		}
+	} else {
+		if (n > sizeof(devpriv->ai_fifo_buffer) /
+		    sizeof(devpriv->ai_fifo_buffer[0])) {
+			a4l_err(dev,
+				"ni_ai_fifo_read: "
+				"bug! ai_fifo_buffer too small");
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+			return;
+		}
+		for (i = 0; i < n; i++) {
+			devpriv->ai_fifo_buffer[i] =
+				ni_readw(ADC_FIFO_Data_Register);
+		}
+		a4l_buf_put(subd,
+			    devpriv->ai_fifo_buffer,
+			    n * sizeof(devpriv->ai_fifo_buffer[0]));
+	}
+}
+
+static void ni_handle_fifo_half_full(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	ni_ai_fifo_read(subd, boardtype.ai_fifo_depth / 2);
+}
+
+#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_ai_drain_dma(struct a4l_subdevice *subd)
+{
+	int i;
+	static const int timeout = 10000;
+	unsigned long flags;
+	int retval = 0;
+	struct a4l_device *dev = subd->dev;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ai_mite_chan) {
+		for (i = 0; i < timeout; i++) {
+			if ((devpriv->stc_readw(dev,
+						AI_Status_1_Register) &
+			     AI_FIFO_Empty_St)
+			    && a4l_mite_bytes_in_transit(devpriv->
+						     ai_mite_chan) == 0)
+				break;
+			a4l_udelay(5);
+		}
+		if (i == timeout) {
+			a4l_info(dev, "wait for dma drain timed out\n");
+
+			a4l_info(dev, "a4l_mite_bytes_in_transit=%i, "
+				 "AI_Status1_Register=0x%x\n",
+				 a4l_mite_bytes_in_transit(devpriv->ai_mite_chan),
+				 devpriv->stc_readw(dev, AI_Status_1_Register));
+			retval = -1;
+		}
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	ni_sync_ai_dma(subd);
+
+	return retval;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+/* Empties the AI fifo */
+static void ni_handle_fifo_dregs(struct a4l_subdevice *subd)
+{
+	sampl_t data[2];
+	u32 dl;
+	short fifo_empty;
+	int i;
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type == ni_reg_611x) {
+		while ((devpriv->stc_readw(dev,
+					   AI_Status_1_Register) &
+			AI_FIFO_Empty_St) == 0) {
+			dl = ni_readl(ADC_FIFO_Data_611x);
+
+			/* This may get the hi/lo data in the wrong order */
+			data[0] = (dl >> 16);
+			data[1] = (dl & 0xffff);
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+		}
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		i = 0;
+		while (ni_readl(AIFIFO_Status_6143) & 0x04) {
+			dl = ni_readl(AIFIFO_Data_6143);
+
+			/* This may get the hi/lo data in the wrong order */
+			data[0] = (dl >> 16);
+			data[1] = (dl & 0xffff);
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+			i += 2;
+		}
+		// Check if stranded sample is present
+		if (ni_readl(AIFIFO_Status_6143) & 0x01) {
+			ni_writel(0x01, AIFIFO_Control_6143);	// Get stranded sample into FIFO
+			dl = ni_readl(AIFIFO_Data_6143);
+			data[0] = (dl >> 16) & 0xffff;
+			a4l_buf_put(subd, &data[0], sizeof(sampl_t));
+		}
+
+	} else {
+		fifo_empty =
+			devpriv->stc_readw(dev,
+					   AI_Status_1_Register) & AI_FIFO_Empty_St;
+		while (fifo_empty == 0) {
+			for (i = 0;
+			     i <
+				     sizeof(devpriv->ai_fifo_buffer) /
+				     sizeof(devpriv->ai_fifo_buffer[0]); i++) {
+				fifo_empty =
+					devpriv->stc_readw(dev,
+							   AI_Status_1_Register) &
+					AI_FIFO_Empty_St;
+				if (fifo_empty)
+					break;
+				devpriv->ai_fifo_buffer[i] =
+					ni_readw(ADC_FIFO_Data_Register);
+			}
+			a4l_buf_put(subd,
+				    devpriv->ai_fifo_buffer,
+				    i * sizeof(devpriv->ai_fifo_buffer[0]));
+		}
+	}
+}
+
+static void get_last_sample_611x(struct a4l_subdevice *subd)
+{
+	sampl_t data;
+	u32 dl;
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type != ni_reg_611x)
+		return;
+
+	/* Check if there's a single sample stuck in the FIFO */
+	if (ni_readb(XXX_Status) & 0x80) {
+		dl = ni_readl(ADC_FIFO_Data_611x);
+		data = (dl & 0xffff);
+		a4l_buf_put(subd, &data, sizeof(sampl_t));
+	}
+}
+
+static void get_last_sample_6143(struct a4l_subdevice *subd)
+{
+	sampl_t data;
+	u32 dl;
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type != ni_reg_6143)
+		return;
+
+	/* Check if there's a single sample stuck in the FIFO */
+	if (ni_readl(AIFIFO_Status_6143) & 0x01) {
+		/* Get stranded sample into FIFO */
+		ni_writel(0x01, AIFIFO_Control_6143);
+		dl = ni_readl(AIFIFO_Data_6143);
+
+		/* This may get the hi/lo data in the wrong order */
+		data = (dl >> 16) & 0xffff;
+		a4l_buf_put(subd, &data, sizeof(sampl_t));
+	}
+}
+
+static void ni_ai_munge16(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	struct a4l_device *dev = subd->dev;
+	struct a4l_cmd_desc *cmd = a4l_get_cmd(subd);
+	int chan_idx = a4l_get_chan(subd);
+	unsigned int i;
+	sampl_t *array = buf;
+
+	for (i = 0; i < size / sizeof(sampl_t); i++) {
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		array[i] = le16_to_cpu(array[i]);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+		array[i] += devpriv->ai_offset[chan_idx];
+		chan_idx++;
+		chan_idx %= cmd->nb_chan;
+	}
+}
+
+static void ni_ai_munge32(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	struct a4l_device *dev = subd->dev;
+	struct a4l_cmd_desc *cmd = a4l_get_cmd(subd);
+	int chan_idx = a4l_get_chan(subd);
+	unsigned int i;
+	lsampl_t *larray = buf;
+
+	for (i = 0; i < size / sizeof(lsampl_t); i++) {
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		larray[i] = le32_to_cpu(larray[i]);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+		larray[i] += devpriv->ai_offset[chan_idx];
+		chan_idx++;
+		chan_idx %= cmd->nb_chan;
+	}
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_ai_setup_MITE_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+	int err;
+
+	err = ni_request_ai_mite_channel(dev);
+	if (err < 0)
+		return err;
+
+	err = a4l_mite_buf_change(devpriv->ai_mite_chan->ring, subd);
+	if (err < 0)
+		return err;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	switch (boardtype.reg_type) {
+	case ni_reg_611x:
+	case ni_reg_6143:
+		a4l_mite_prep_dma(devpriv->ai_mite_chan, 32, 16);
+		break;
+	case ni_reg_628x:
+		a4l_mite_prep_dma(devpriv->ai_mite_chan, 32, 32);
+		break;
+	default:
+		a4l_mite_prep_dma(devpriv->ai_mite_chan, 16, 16);
+		break;
+	};
+
+	/* start the MITE */
+	a4l_mite_dma_arm(devpriv->ai_mite_chan);
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return 0;
+}
+
+static int ni_ao_setup_MITE_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+	int err;
+
+	err = ni_request_ao_mite_channel(dev);
+	if (err < 0)
+		return err;
+
+	err = a4l_mite_buf_change(devpriv->ao_mite_chan->ring, subd);
+	if (err < 0)
+		return err;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	if (devpriv->ao_mite_chan) {
+
+		if (boardtype.reg_type & (ni_reg_611x | ni_reg_6713)) {
+			a4l_mite_prep_dma(devpriv->ao_mite_chan, 32, 32);
+		} else {
+			/* Doing 32 instead of 16 bit wide transfers
+			   from memory makes the mite do 32 bit pci
+			   transfers, doubling pci bandwidth. */
+			a4l_mite_prep_dma(devpriv->ao_mite_chan, 16, 32);
+		}
+		a4l_mite_dma_arm(devpriv->ao_mite_chan);
+	} else
+		err = -EIO;
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return err;
+}
+
+static int ni_cdo_setup_MITE_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+	int err;
+
+	err = ni_request_cdo_mite_channel(dev);
+	if (err < 0)
+		return err;
+
+	/* No need to get a lock to setup the ring buffer */
+	err = a4l_mite_buf_change(devpriv->cdo_mite_chan->ring, subd);
+	if (err < 0)
+		return err;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	/* This test should be useless but one never knows */
+	if (devpriv->cdo_mite_chan) {
+		/* Configure the DMA transfer */
+		a4l_mite_prep_dma(devpriv->cdo_mite_chan, 32, 32);
+		a4l_mite_dma_arm(devpriv->cdo_mite_chan);
+	} else
+		err = -EIO;
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return err;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static void ni_ai_reset(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	ni_release_ai_mite_channel(dev);
+
+	/* ai configuration */
+	devpriv->stc_writew(dev, AI_Configuration_Start | AI_Reset,
+			    Joint_Reset_Register);
+
+	ni_set_bits(dev, Interrupt_A_Enable_Register,
+		    AI_SC_TC_Interrupt_Enable | AI_START1_Interrupt_Enable |
+		    AI_START2_Interrupt_Enable | AI_START_Interrupt_Enable |
+		    AI_STOP_Interrupt_Enable | AI_Error_Interrupt_Enable |
+		    AI_FIFO_Interrupt_Enable, 0);
+
+	ni_clear_ai_fifo(dev);
+
+	if (boardtype.reg_type != ni_reg_6143)
+		ni_writeb(0, Misc_Command);
+
+	devpriv->stc_writew(dev, AI_Disarm, AI_Command_1_Register);	/* reset pulses */
+	devpriv->stc_writew(dev,
+			    AI_Start_Stop | AI_Mode_1_Reserved /*| AI_Trigger_Once */ ,
+			    AI_Mode_1_Register);
+	devpriv->stc_writew(dev, 0x0000, AI_Mode_2_Register);
+	/* generate FIFO interrupts on non-empty */
+	devpriv->stc_writew(dev, (0 << 6) | 0x0000, AI_Mode_3_Register);
+	if (boardtype.reg_type == ni_reg_611x) {
+		devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
+				    AI_SOC_Polarity |
+				    AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register);
+		devpriv->stc_writew(dev, AI_SCAN_IN_PROG_Output_Select(3) |
+				    AI_EXTMUX_CLK_Output_Select(0) |
+				    AI_LOCALMUX_CLK_Output_Select(2) |
+				    AI_SC_TC_Output_Select(3) |
+				    AI_CONVERT_Output_Select(AI_CONVERT_Output_Enable_High),
+				    AI_Output_Control_Register);
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
+				    AI_SOC_Polarity |
+				    AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register);
+		devpriv->stc_writew(dev, AI_SCAN_IN_PROG_Output_Select(3) |
+				    AI_EXTMUX_CLK_Output_Select(0) |
+				    AI_LOCALMUX_CLK_Output_Select(2) |
+				    AI_SC_TC_Output_Select(3) |
+				    AI_CONVERT_Output_Select(AI_CONVERT_Output_Enable_Low),
+				    AI_Output_Control_Register);
+	} else {
+		unsigned int ai_output_control_bits;
+		devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
+				    AI_SOC_Polarity |
+				    AI_CONVERT_Pulse_Width |
+				    AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register);
+		ai_output_control_bits = AI_SCAN_IN_PROG_Output_Select(3) |
+			AI_EXTMUX_CLK_Output_Select(0) |
+			AI_LOCALMUX_CLK_Output_Select(2) |
+			AI_SC_TC_Output_Select(3);
+		if (boardtype.reg_type == ni_reg_622x)
+			ai_output_control_bits |=
+				AI_CONVERT_Output_Select
+				(AI_CONVERT_Output_Enable_High);
+		else
+			ai_output_control_bits |=
+				AI_CONVERT_Output_Select
+				(AI_CONVERT_Output_Enable_Low);
+		devpriv->stc_writew(dev, ai_output_control_bits,
+				    AI_Output_Control_Register);
+	}
+
+	/* the following registers should not be changed, because there
+	 * are no backup registers in devpriv.  If you want to change
+	 * any of these, add a backup register and other appropriate code:
+	 *      AI_Mode_1_Register
+	 *      AI_Mode_3_Register
+	 *      AI_Personal_Register
+	 *      AI_Output_Control_Register
+	 */
+
+	/* clear interrupts */
+	devpriv->stc_writew(dev, AI_SC_TC_Error_Confirm | AI_START_Interrupt_Ack |
+			    AI_START2_Interrupt_Ack | AI_START1_Interrupt_Ack |
+			    AI_SC_TC_Interrupt_Ack | AI_Error_Interrupt_Ack |
+			    AI_STOP_Interrupt_Ack, Interrupt_A_Ack_Register);
+
+	devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register);
+}
+
+static int ni_ai_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	const unsigned int mask = (1 << boardtype.adbits) - 1;
+	int i, n;
+	unsigned int signbits;
+	unsigned short d;
+	unsigned long dl;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	ni_load_channelgain_list(dev, 1, &insn->chan_desc);
+
+	ni_clear_ai_fifo(dev);
+
+	signbits = devpriv->ai_offset[0];
+	if (boardtype.reg_type == ni_reg_611x) {
+		for (n = 0; n < num_adc_stages_611x; n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+			a4l_udelay(1);
+		}
+		for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+			/* The 611x has screwy 32-bit FIFOs. */
+			d = 0;
+			for (i = 0; i < NI_TIMEOUT; i++) {
+				if (ni_readb(XXX_Status) & 0x80) {
+					d = (ni_readl(ADC_FIFO_Data_611x) >> 16)
+						& 0xffff;
+					break;
+				}
+				if (!(devpriv->stc_readw(dev,
+							 AI_Status_1_Register) &
+				      AI_FIFO_Empty_St)) {
+					d = ni_readl(ADC_FIFO_Data_611x) &
+						0xffff;
+					break;
+				}
+			}
+			if (i == NI_TIMEOUT) {
+				a4l_warn(dev,
+					 "ni_mio_common: "
+					 "timeout in 611x ni_ai_insn_read\n");
+				return -ETIME;
+			}
+			d += signbits;
+			data[n] = d;
+		}
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+
+			/* The 6143 has 32-bit FIFOs.
+			   You need to strobe a bit to move a single
+			   16bit stranded sample into the FIFO */
+			dl = 0;
+			for (i = 0; i < NI_TIMEOUT; i++) {
+				if (ni_readl(AIFIFO_Status_6143) & 0x01) {
+					ni_writel(0x01, AIFIFO_Control_6143);	// Get stranded sample into FIFO
+					dl = ni_readl(AIFIFO_Data_6143);
+					break;
+				}
+			}
+			if (i == NI_TIMEOUT) {
+				a4l_warn(dev,
+					 "ni_mio_common: "
+					 "timeout in 6143 ni_ai_insn_read\n");
+				return -ETIME;
+			}
+			data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF;
+		}
+	} else {
+		for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+			for (i = 0; i < NI_TIMEOUT; i++) {
+				if (!(devpriv->stc_readw(dev,
+							 AI_Status_1_Register) &
+				      AI_FIFO_Empty_St))
+					break;
+			}
+			if (i == NI_TIMEOUT) {
+				a4l_warn(dev,
+					 "ni_mio_common: "
+					 "timeout in ni_ai_insn_read\n");
+				return -ETIME;
+			}
+			if (boardtype.reg_type & ni_reg_m_series_mask) {
+				data[n] = ni_readl(M_Offset_AI_FIFO_Data) & mask;
+			} else {
+				d = ni_readw(ADC_FIFO_Data_Register);
+				/* subtle: needs to be short addition */
+				d += signbits;
+				data[n] = d;
+			}
+		}
+	}
+	return 0;
+}
+
+void ni_prime_channelgain_list(struct a4l_device *dev)
+{
+	int i;
+	devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register);
+	for (i = 0; i < NI_TIMEOUT; ++i) {
+		if (!(devpriv->stc_readw(dev,
+					 AI_Status_1_Register) &
+		      AI_FIFO_Empty_St)) {
+			devpriv->stc_writew(dev, 1, ADC_FIFO_Clear);
+			return;
+		}
+		a4l_udelay(1);
+	}
+	a4l_warn(dev, "ni_mio_common: timeout loading channel/gain list\n");
+}
+
+static void ni_m_series_load_channelgain_list(struct a4l_device *dev,
+					      unsigned int n_chan,
+					      unsigned int *list)
+{
+	unsigned int chan, range, aref;
+	unsigned int i;
+	unsigned offset;
+	unsigned int dither;
+	unsigned range_code;
+
+	devpriv->stc_writew(dev, 1, Configuration_Memory_Clear);
+
+	if ((list[0] & CR_ALT_SOURCE)) {
+		unsigned bypass_bits;
+		chan = CR_CHAN(list[0]);
+		range = CR_RNG(list[0]);
+		range_code = ni_gainlkup[boardtype.gainlkup][range];
+		dither = ((list[0] & CR_ALT_FILTER) != 0);
+		bypass_bits = MSeries_AI_Bypass_Config_FIFO_Bit;
+		bypass_bits |= chan;
+		bypass_bits |=
+			(devpriv->
+			 ai_calib_source) & (MSeries_AI_Bypass_Cal_Sel_Pos_Mask |
+					     MSeries_AI_Bypass_Cal_Sel_Neg_Mask |
+					     MSeries_AI_Bypass_Mode_Mux_Mask |
+					     MSeries_AO_Bypass_AO_Cal_Sel_Mask);
+		bypass_bits |= MSeries_AI_Bypass_Gain_Bits(range_code);
+		if (dither)
+			bypass_bits |= MSeries_AI_Bypass_Dither_Bit;
+		// don't use 2's complement encoding
+		bypass_bits |= MSeries_AI_Bypass_Polarity_Bit;
+		ni_writel(bypass_bits, M_Offset_AI_Config_FIFO_Bypass);
+	} else {
+		ni_writel(0, M_Offset_AI_Config_FIFO_Bypass);
+	}
+	offset = 0;
+	for (i = 0; i < n_chan; i++) {
+		unsigned config_bits = 0;
+		chan = CR_CHAN(list[i]);
+		aref = CR_AREF(list[i]);
+		range = CR_RNG(list[i]);
+		dither = ((list[i] & CR_ALT_FILTER) != 0);
+
+		range_code = ni_gainlkup[boardtype.gainlkup][range];
+		devpriv->ai_offset[i] = offset;
+		switch (aref) {
+		case AREF_DIFF:
+			config_bits |=
+				MSeries_AI_Config_Channel_Type_Differential_Bits;
+			break;
+		case AREF_COMMON:
+			config_bits |=
+				MSeries_AI_Config_Channel_Type_Common_Ref_Bits;
+			break;
+		case AREF_GROUND:
+			config_bits |=
+				MSeries_AI_Config_Channel_Type_Ground_Ref_Bits;
+			break;
+		case AREF_OTHER:
+			break;
+		}
+		config_bits |= MSeries_AI_Config_Channel_Bits(chan);
+		config_bits |=
+			MSeries_AI_Config_Bank_Bits(boardtype.reg_type, chan);
+		config_bits |= MSeries_AI_Config_Gain_Bits(range_code);
+		if (i == n_chan - 1)
+			config_bits |= MSeries_AI_Config_Last_Channel_Bit;
+		if (dither)
+			config_bits |= MSeries_AI_Config_Dither_Bit;
+		// don't use 2's complement encoding
+		config_bits |= MSeries_AI_Config_Polarity_Bit;
+		ni_writew(config_bits, M_Offset_AI_Config_FIFO_Data);
+	}
+	ni_prime_channelgain_list(dev);
+}
+
+/*
+ * Notes on the 6110 and 6111:
+ * These boards a slightly different than the rest of the series, since
+ * they have multiple A/D converters.
+ * From the driver side, the configuration memory is a
+ * little different.
+ * Configuration Memory Low:
+ *   bits 15-9: same
+ *   bit 8: unipolar/bipolar (should be 0 for bipolar)
+ *   bits 0-3: gain.  This is 4 bits instead of 3 for the other boards
+ *       1001 gain=0.1 (+/- 50)
+ *       1010 0.2
+ *       1011 0.1
+ *       0001 1
+ *       0010 2
+ *       0011 5
+ *       0100 10
+ *       0101 20
+ *       0110 50
+ * Configuration Memory High:
+ *   bits 12-14: Channel Type
+ *       001 for differential
+ *       000 for calibration
+ *   bit 11: coupling  (this is not currently handled)
+ *       1 AC coupling
+ *       0 DC coupling
+ *   bits 0-2: channel
+ *       valid channels are 0-3
+ */
+static void ni_load_channelgain_list(struct a4l_device *dev,
+				     unsigned int n_chan, unsigned int *list)
+{
+	unsigned int chan, range, aref;
+	unsigned int i;
+	unsigned int hi, lo;
+	unsigned offset;
+	unsigned int dither;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		ni_m_series_load_channelgain_list(dev, n_chan, list);
+		return;
+	}
+	if (n_chan == 1 && (boardtype.reg_type != ni_reg_611x)
+	    && (boardtype.reg_type != ni_reg_6143)) {
+		if (devpriv->changain_state
+		    && devpriv->changain_spec == list[0]) {
+			/* ready to go. */
+			return;
+		}
+		devpriv->changain_state = 1;
+		devpriv->changain_spec = list[0];
+	} else {
+		devpriv->changain_state = 0;
+	}
+
+	devpriv->stc_writew(dev, 1, Configuration_Memory_Clear);
+
+	/* Set up Calibration mode if required */
+	if (boardtype.reg_type == ni_reg_6143) {
+		if ((list[0] & CR_ALT_SOURCE)
+		    && !devpriv->ai_calib_source_enabled) {
+			/* Strobe Relay enable bit */
+			ni_writew(devpriv->
+				  ai_calib_source |
+				  Calibration_Channel_6143_RelayOn,
+				  Calibration_Channel_6143);
+			ni_writew(devpriv->ai_calib_source,
+				  Calibration_Channel_6143);
+			devpriv->ai_calib_source_enabled = 1;
+			/* Allow relays to change */
+			if(rtdm_in_rt_context())
+				rtdm_task_sleep(100*1000000);
+			else
+				msleep_interruptible(100);
+		} else if (!(list[0] & CR_ALT_SOURCE)
+			   && devpriv->ai_calib_source_enabled) {
+			/* Strobe Relay disable bit */
+			ni_writew(devpriv->
+				  ai_calib_source |
+				  Calibration_Channel_6143_RelayOff,
+				  Calibration_Channel_6143);
+			ni_writew(devpriv->ai_calib_source,
+				  Calibration_Channel_6143);
+			devpriv->ai_calib_source_enabled = 0;
+			/* Allow relays to change */
+			if(rtdm_in_rt_context())
+				rtdm_task_sleep(100*1000000);
+			else
+				msleep_interruptible(100);
+		}
+	}
+
+	offset = 1 << (boardtype.adbits - 1);
+	for (i = 0; i < n_chan; i++) {
+		if ((boardtype.reg_type != ni_reg_6143)
+		    && (list[i] & CR_ALT_SOURCE)) {
+			chan = devpriv->ai_calib_source;
+		} else {
+			chan = CR_CHAN(list[i]);
+		}
+		aref = CR_AREF(list[i]);
+		range = CR_RNG(list[i]);
+		dither = ((list[i] & CR_ALT_FILTER) != 0);
+
+		/* fix the external/internal range differences */
+		range = ni_gainlkup[boardtype.gainlkup][range];
+		if (boardtype.reg_type == ni_reg_611x)
+			devpriv->ai_offset[i] = offset;
+		else
+			devpriv->ai_offset[i] = (range & 0x100) ? 0 : offset;
+
+		hi = 0;
+		if ((list[i] & CR_ALT_SOURCE)) {
+			if (boardtype.reg_type == ni_reg_611x)
+				ni_writew(CR_CHAN(list[i]) & 0x0003,
+					  Calibration_Channel_Select_611x);
+		} else {
+			if (boardtype.reg_type == ni_reg_611x)
+				aref = AREF_DIFF;
+			else if (boardtype.reg_type == ni_reg_6143)
+				aref = AREF_OTHER;
+			switch (aref) {
+			case AREF_DIFF:
+				hi |= AI_DIFFERENTIAL;
+				break;
+			case AREF_COMMON:
+				hi |= AI_COMMON;
+				break;
+			case AREF_GROUND:
+				hi |= AI_GROUND;
+				break;
+			case AREF_OTHER:
+				break;
+			}
+		}
+		hi |= AI_CONFIG_CHANNEL(chan);
+
+		ni_writew(hi, Configuration_Memory_High);
+
+		if (boardtype.reg_type != ni_reg_6143) {
+			lo = range;
+			if (i == n_chan - 1)
+				lo |= AI_LAST_CHANNEL;
+			if (dither)
+				lo |= AI_DITHER;
+
+			ni_writew(lo, Configuration_Memory_Low);
+		}
+	}
+
+	/* prime the channel/gain list */
+	if ((boardtype.reg_type != ni_reg_611x)
+	    && (boardtype.reg_type != ni_reg_6143)) {
+		ni_prime_channelgain_list(dev);
+	}
+}
+
+static int ni_ns_to_timer(const struct a4l_device *dev,
+			  unsigned int nanosec, int round_mode)
+{
+	int divider;
+	switch (round_mode) {
+	case TRIG_ROUND_NEAREST:
+	default:
+		divider = (nanosec + devpriv->clock_ns / 2) / devpriv->clock_ns;
+		break;
+	case TRIG_ROUND_DOWN:
+		divider = (nanosec) / devpriv->clock_ns;
+		break;
+	case TRIG_ROUND_UP:
+		divider = (nanosec + devpriv->clock_ns - 1) / devpriv->clock_ns;
+		break;
+	}
+	return divider - 1;
+}
+
+static unsigned int ni_timer_to_ns(const struct a4l_device *dev, int timer)
+{
+	return devpriv->clock_ns * (timer + 1);
+}
+
+static unsigned int ni_min_ai_scan_period_ns(struct a4l_device *dev,
+					     unsigned int num_channels)
+{
+	switch (boardtype.reg_type) {
+	case ni_reg_611x:
+	case ni_reg_6143:
+		/* simultaneously-sampled inputs */
+		return boardtype.ai_speed;
+		break;
+	default:
+		/* multiplexed inputs */
+		break;
+	};
+	return boardtype.ai_speed * num_channels;
+}
+
+static struct a4l_cmd_desc mio_ai_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT | TRIG_EXT,
+	.scan_begin_src = TRIG_TIMER | TRIG_EXT,
+	.convert_src = TRIG_TIMER | TRIG_EXT | TRIG_NOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+int ni_ai_inttrig(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct a4l_device *dev = subd->dev;
+
+	if (trignum != 0)
+		return -EINVAL;
+
+	devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2,
+			    AI_Command_2_Register);
+
+	return 1;
+}
+
+#define cfc_check_trigger_arg_is(a,b) __cfc_check_trigger_arg_is(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_arg_is(unsigned int *arg,
+	                                     unsigned int val,
+					     struct a4l_device *dev,
+	                                     unsigned int line)
+{
+	if (*arg != val) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) != val (%d) \n",
+			line, *arg, val);
+		*arg = val;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define cfc_check_trigger_is_unique(a) __cfc_check_trigger_is_unique(a, dev, __LINE__)
+static inline int __cfc_check_trigger_is_unique(unsigned int src,
+					        struct a4l_device *dev,
+	                                        unsigned int line)
+{
+	/* this test is true if more than one _src bit is set */
+	if ((src & (src - 1)) != 0) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: src (%d) \n", line, src);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define cfc_check_trigger_src(a,b) __cfc_check_trigger_src(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_src(unsigned int *src,
+	                                  unsigned int flags,
+					  struct a4l_device *dev,
+	                                  unsigned int line)
+{
+	unsigned int orig_src = *src;
+
+	*src = orig_src & flags;
+	if (*src == 0 || *src != orig_src){
+		a4l_dbg(1, drv_dbg, dev, "line %d: *src (%d)  orig_src (%d) flags(%d) \n",
+			line, *src, orig_src, flags);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#define cfc_check_trigger_arg_min(a,b) __cfc_check_trigger_arg_min(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_arg_min(unsigned int *arg,
+					      unsigned int val,
+					      struct a4l_device *dev,
+	                                      unsigned int line)
+{
+	if (*arg < val) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) < val (%d) \n",
+			line, *arg, val);
+		*arg = val;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define cfc_check_trigger_arg_max(a,b) __cfc_check_trigger_arg_max(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_arg_max(unsigned int *arg,
+					      unsigned int val,
+					      struct a4l_device *dev,
+	                                      unsigned int line)
+{
+	if (*arg > val) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) > val (%d) \n",
+			line, *arg, val);
+		*arg = val;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int ni_ai_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int sources;
+	int tmp, err = 0;
+
+	/* Step 1 : check if triggers are trivially valid */
+	err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT | TRIG_EXT);
+	err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER | TRIG_EXT);
+
+	sources = TRIG_TIMER | TRIG_EXT;
+	if (boardtype.reg_type == ni_reg_611x || boardtype.reg_type == ni_reg_6143)
+		sources |= TRIG_NOW;
+
+	err |= cfc_check_trigger_src(&cmd->convert_src, sources);
+	err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+	err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(1))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 1 \n");
+		return -EINVAL;
+	}
+
+	/* Step 2a : make sure trigger sources are unique */
+	err |= cfc_check_trigger_is_unique(cmd->start_src);
+	err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
+	err |= cfc_check_trigger_is_unique(cmd->convert_src);
+	err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+	/* Step 2b : and mutually compatible */
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(2))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 2 \n");
+		return -EINVAL;
+	}
+
+	/* Step 3: check if arguments are trivially valid */
+
+	if (cmd->start_src == TRIG_EXT) {
+		/* external trigger */
+		unsigned int tmp = CR_CHAN(cmd->start_arg);
+		if (tmp > 16)
+			tmp = 16;
+		tmp |= (cmd->start_arg & (CR_INVERT | CR_EDGE));
+		err |= cfc_check_trigger_arg_is(&cmd->start_arg, tmp);
+
+	} else {
+		/* true for both TRIG_NOW and TRIG_INT */
+		err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+	}
+
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+		err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
+			ni_min_ai_scan_period_ns(dev, cmd->nb_chan));
+
+		err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg,
+						 devpriv->clock_ns * 0xffffff);
+	} else if (cmd->scan_begin_src == TRIG_EXT) {
+		/* external trigger */
+		unsigned int tmp = CR_CHAN(cmd->scan_begin_arg);
+
+		if (tmp > 16)
+			tmp = 16;
+		tmp |= (cmd->scan_begin_arg & (CR_INVERT | CR_EDGE));
+		err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
+
+	} else {		/* TRIG_OTHER */
+		err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+
+	}
+
+	if (cmd->convert_src == TRIG_TIMER) {
+		if ((boardtype.reg_type == ni_reg_611x)
+		    || (boardtype.reg_type == ni_reg_6143)) {
+			err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
+
+		} else {
+			err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
+							 boardtype.ai_speed);
+			err |= cfc_check_trigger_arg_max(&cmd->convert_arg,
+						devpriv->clock_ns * 0xffff);
+		}
+	} else if (cmd->convert_src == TRIG_EXT) {
+		/* external trigger */
+		unsigned int tmp = CR_CHAN(cmd->convert_arg);
+
+		if (tmp > 16)
+			tmp = 16;
+		tmp |= (cmd->convert_arg & (CR_ALT_FILTER | CR_INVERT));
+		err |= cfc_check_trigger_arg_is(&cmd->convert_arg, tmp);
+	} else if (cmd->convert_src == TRIG_NOW) {
+		err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
+	}
+
+	err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->nb_chan);
+
+	if (cmd->stop_src == TRIG_COUNT) {
+		unsigned int max_count = 0x01000000;
+
+		if (boardtype.reg_type == ni_reg_611x)
+			max_count -= num_adc_stages_611x;
+		err |= cfc_check_trigger_arg_max(&cmd->stop_arg, max_count);
+		err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
+
+	} else {
+		/* TRIG_NONE */
+		err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+	}
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(3))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 3 \n");
+		return 3;
+	}
+
+	/* step 4: fix up any arguments */
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+		tmp = cmd->scan_begin_arg;
+		cmd->scan_begin_arg =
+		    ni_timer_to_ns(dev, ni_ns_to_timer(dev,
+						       cmd->scan_begin_arg,
+						       cmd->flags &
+						       TRIG_ROUND_MASK));
+		if (tmp != cmd->scan_begin_arg)
+			err++;
+	}
+	if (cmd->convert_src == TRIG_TIMER) {
+		if ((boardtype.reg_type != ni_reg_611x)
+		    && (boardtype.reg_type != ni_reg_6143)) {
+			tmp = cmd->convert_arg;
+			cmd->convert_arg =
+			    ni_timer_to_ns(dev, ni_ns_to_timer(dev,
+							       cmd->convert_arg,
+							       cmd->
+							       flags &
+							       TRIG_ROUND_MASK));
+			if (tmp != cmd->convert_arg)
+				err++;
+			if (cmd->scan_begin_src == TRIG_TIMER &&
+			    cmd->scan_begin_arg <
+			    cmd->convert_arg * cmd->scan_end_arg) {
+				cmd->scan_begin_arg =
+				    cmd->convert_arg * cmd->scan_end_arg;
+				err++;
+			}
+		}
+	}
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(4))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 4 \n");
+		return -EINVAL;
+	}
+
+	return 0;
+
+
+}
+
+static int ni_ai_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+	int timer;
+	int mode1 = 0;		/* mode1 is needed for both stop and convert */
+	int mode2 = 0;
+	int start_stop_select = 0;
+	unsigned int stop_count;
+	int interrupt_a_enable = 0;
+
+	a4l_info(dev, "start\n");
+
+	if (a4l_get_irq(dev) == A4L_IRQ_UNUSED) {
+		a4l_err(dev, "ni_ai_cmd: cannot run command without an irq");
+		return -EIO;
+	}
+	ni_clear_ai_fifo(dev);
+
+	ni_load_channelgain_list(dev, cmd->nb_chan, cmd->chan_descs);
+
+	/* start configuration */
+	devpriv->stc_writew(dev, AI_Configuration_Start, Joint_Reset_Register);
+
+	/* disable analog triggering for now, since it
+	 * interferes with the use of pfi0 */
+	devpriv->an_trig_etc_reg &= ~Analog_Trigger_Enable;
+	devpriv->stc_writew(dev, devpriv->an_trig_etc_reg,
+			    Analog_Trigger_Etc_Register);
+
+	switch (cmd->start_src) {
+	case TRIG_INT:
+	case TRIG_NOW:
+		devpriv->stc_writew(dev, AI_START2_Select(0) |
+				    AI_START1_Sync | AI_START1_Edge | AI_START1_Select(0),
+				    AI_Trigger_Select_Register);
+		break;
+	case TRIG_EXT:
+	{
+		int chan = CR_CHAN(cmd->start_arg);
+		unsigned int bits = AI_START2_Select(0) |
+			AI_START1_Sync | AI_START1_Select(chan + 1);
+
+		if (cmd->start_arg & CR_INVERT)
+			bits |= AI_START1_Polarity;
+		if (cmd->start_arg & CR_EDGE)
+			bits |= AI_START1_Edge;
+		devpriv->stc_writew(dev, bits,
+				    AI_Trigger_Select_Register);
+		break;
+	}
+	}
+
+	mode2 &= ~AI_Pre_Trigger;
+	mode2 &= ~AI_SC_Initial_Load_Source;
+	mode2 &= ~AI_SC_Reload_Mode;
+	devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+	if (cmd->nb_chan == 1 || (boardtype.reg_type == ni_reg_611x)
+	    || (boardtype.reg_type == ni_reg_6143)) {
+		start_stop_select |= AI_STOP_Polarity;
+		start_stop_select |= AI_STOP_Select(31);/* logic low */
+		start_stop_select |= AI_STOP_Sync;
+	} else {
+		start_stop_select |= AI_STOP_Select(19);/* ai configuration memory */
+	}
+	devpriv->stc_writew(dev, start_stop_select,
+			    AI_START_STOP_Select_Register);
+
+	devpriv->ai_cmd2 = 0;
+	switch (cmd->stop_src) {
+	case TRIG_COUNT:
+		stop_count = cmd->stop_arg - 1;
+
+		if (boardtype.reg_type == ni_reg_611x) {
+			/* have to take 3 stage adc pipeline into account */
+			stop_count += num_adc_stages_611x;
+		}
+		/* stage number of scans */
+		devpriv->stc_writel(dev, stop_count, AI_SC_Load_A_Registers);
+
+		mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Trigger_Once;
+		devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
+		/* load SC (Scan Count) */
+		devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register);
+
+		devpriv->ai_continuous = 0;
+		if (stop_count == 0) {
+			devpriv->ai_cmd2 |= AI_End_On_End_Of_Scan;
+			interrupt_a_enable |= AI_STOP_Interrupt_Enable;
+			/* this is required to get the last sample
+			   for nb_chan > 1, not sure why */
+			if (cmd->nb_chan > 1)
+				start_stop_select |=
+					AI_STOP_Polarity | AI_STOP_Edge;
+		}
+		break;
+	case TRIG_NONE:
+		/* stage number of scans */
+		devpriv->stc_writel(dev, 0, AI_SC_Load_A_Registers);
+
+		mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Continuous;
+		devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
+
+		/* load SC (Scan Count) */
+		devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register);
+
+		devpriv->ai_continuous = 1;
+
+		break;
+	}
+
+	switch (cmd->scan_begin_src) {
+	case TRIG_TIMER:
+		/*
+		  stop bits for non 611x boards
+		  AI_SI_Special_Trigger_Delay=0
+		  AI_Pre_Trigger=0
+		  AI_START_STOP_Select_Register:
+		  AI_START_Polarity=0 (?)      rising edge
+		  AI_START_Edge=1              edge triggered
+		  AI_START_Sync=1 (?)
+		  AI_START_Select=0            SI_TC
+		  AI_STOP_Polarity=0           rising edge
+		  AI_STOP_Edge=0               level
+		  AI_STOP_Sync=1
+		  AI_STOP_Select=19            external pin (configuration mem)
+		*/
+		start_stop_select |= AI_START_Edge | AI_START_Sync;
+		devpriv->stc_writew(dev, start_stop_select,
+				    AI_START_STOP_Select_Register);
+
+		mode2 |= AI_SI_Reload_Mode(0);
+		/* AI_SI_Initial_Load_Source=A */
+		mode2 &= ~AI_SI_Initial_Load_Source;
+
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+		/* load SI */
+		timer = ni_ns_to_timer(dev, cmd->scan_begin_arg,
+				       TRIG_ROUND_NEAREST);
+		devpriv->stc_writel(dev, timer, AI_SI_Load_A_Registers);
+		devpriv->stc_writew(dev, AI_SI_Load, AI_Command_1_Register);
+		break;
+	case TRIG_EXT:
+		if (cmd->scan_begin_arg & CR_EDGE)
+			start_stop_select |= AI_START_Edge;
+		/* AI_START_Polarity==1 is falling edge */
+		if (cmd->scan_begin_arg & CR_INVERT)
+			start_stop_select |= AI_START_Polarity;
+		if (cmd->scan_begin_src != cmd->convert_src ||
+		    (cmd->scan_begin_arg & ~CR_EDGE) !=
+		    (cmd->convert_arg & ~CR_EDGE))
+			start_stop_select |= AI_START_Sync;
+		start_stop_select |=
+			AI_START_Select(1 + CR_CHAN(cmd->scan_begin_arg));
+		devpriv->stc_writew(dev, start_stop_select,
+				    AI_START_STOP_Select_Register);
+		break;
+	}
+
+	switch (cmd->convert_src) {
+	case TRIG_TIMER:
+	case TRIG_NOW:
+		if (cmd->convert_arg == 0 || cmd->convert_src == TRIG_NOW)
+			timer = 1;
+		else
+			timer = ni_ns_to_timer(dev, cmd->convert_arg,
+					       TRIG_ROUND_NEAREST);
+		devpriv->stc_writew(dev, 1, AI_SI2_Load_A_Register);	/* 0,0 does not work. */
+		devpriv->stc_writew(dev, timer, AI_SI2_Load_B_Register);
+
+		/* AI_SI2_Reload_Mode = alternate */
+		/* AI_SI2_Initial_Load_Source = A */
+		mode2 &= ~AI_SI2_Initial_Load_Source;
+		mode2 |= AI_SI2_Reload_Mode;
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+		/* AI_SI2_Load */
+		devpriv->stc_writew(dev, AI_SI2_Load, AI_Command_1_Register);
+
+		mode2 |= AI_SI2_Reload_Mode; /* alternate */
+		mode2 |= AI_SI2_Initial_Load_Source; /* B */
+
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+		break;
+	case TRIG_EXT:
+		mode1 |= AI_CONVERT_Source_Select(1 + cmd->convert_arg);
+		if ((cmd->convert_arg & CR_INVERT) == 0)
+			mode1 |= AI_CONVERT_Source_Polarity;
+		devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
+
+		mode2 |= AI_Start_Stop_Gate_Enable | AI_SC_Gate_Enable;
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+		break;
+	}
+
+	if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) {
+
+		/* interrupt on FIFO, errors, SC_TC */
+		interrupt_a_enable |= AI_Error_Interrupt_Enable |
+			AI_SC_TC_Interrupt_Enable;
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		interrupt_a_enable |= AI_FIFO_Interrupt_Enable;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		if (cmd->flags & TRIG_WAKE_EOS
+		    || (devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) {
+			/* wake on end-of-scan */
+			devpriv->aimode = AIMODE_SCAN;
+		} else {
+			devpriv->aimode = AIMODE_HALF_FULL;
+		}
+
+		switch (devpriv->aimode) {
+		case AIMODE_HALF_FULL:
+			/* generate FIFO interrupts and DMA requests on half-full */
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+			devpriv->stc_writew(dev, AI_FIFO_Mode_HF_to_E,
+					    AI_Mode_3_Register);
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			devpriv->stc_writew(dev, AI_FIFO_Mode_HF,
+					    AI_Mode_3_Register);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			break;
+		case AIMODE_SAMPLE:
+			/* generate FIFO interrupts on non-empty */
+			devpriv->stc_writew(dev, AI_FIFO_Mode_NE,
+					    AI_Mode_3_Register);
+			break;
+		case AIMODE_SCAN:
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+			devpriv->stc_writew(dev, AI_FIFO_Mode_NE,
+					    AI_Mode_3_Register);
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			devpriv->stc_writew(dev, AI_FIFO_Mode_HF,
+					    AI_Mode_3_Register);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			interrupt_a_enable |= AI_STOP_Interrupt_Enable;
+			break;
+		default:
+			break;
+		}
+
+		/* Clear interrupts */
+		devpriv->stc_writew(dev,
+				    AI_Error_Interrupt_Ack | AI_STOP_Interrupt_Ack |
+				    AI_START_Interrupt_Ack | AI_START2_Interrupt_Ack |
+				    AI_START1_Interrupt_Ack | AI_SC_TC_Interrupt_Ack |
+				    AI_SC_TC_Error_Confirm, Interrupt_A_Ack_Register);	/* clear interrupts */
+
+		ni_set_bits(dev, Interrupt_A_Enable_Register,
+			    interrupt_a_enable, 1);
+
+		a4l_info(dev, "Interrupt_A_Enable_Register = 0x%04x\n",
+			 devpriv->int_a_enable_reg);
+	} else {
+		/* interrupt on nothing */
+		ni_set_bits(dev, Interrupt_A_Enable_Register, ~0, 0);
+
+		/* XXX start polling if necessary */
+		a4l_warn(dev, "ni_ai_cmd: interrupting on nothing\n");
+	}
+
+	/* end configuration */
+	devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register);
+
+	switch (cmd->scan_begin_src) {
+	case TRIG_TIMER:
+		devpriv->stc_writew(dev,
+				    AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm | AI_SC_Arm,
+				    AI_Command_1_Register);
+		break;
+	case TRIG_EXT:
+		/* XXX AI_SI_Arm? */
+		devpriv->stc_writew(dev,
+				    AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm | AI_SC_Arm,
+				    AI_Command_1_Register);
+		break;
+	}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	{
+		int retval = ni_ai_setup_MITE_dma(subd);
+		if (retval)
+			return retval;
+	}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	switch (cmd->start_src) {
+	case TRIG_NOW:
+		/* AI_START1_Pulse */
+		devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2,
+				    AI_Command_2_Register);
+		break;
+	case TRIG_EXT:
+		/* TODO: set trigger callback field to NULL */
+		break;
+	case TRIG_INT:
+		/* TODO: set trigger callback field to ni_ai_inttrig */
+		break;
+	}
+
+	a4l_info(dev, "exit\n");
+
+	return 0;
+}
+
+int ni_ai_config_analog_trig(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int a, b, modebits;
+	int err = 0;
+	uint32_t *data = (uint32_t *)insn->data;
+
+	/* data[1] is flags
+	 * data[2] is analog line
+	 * data[3] is set level
+	 * data[4] is reset level */
+	if (!boardtype.has_analog_trig)
+		return -EINVAL;
+
+	if ((data[1] & 0xffff0000) != A4L_EV_SCAN_BEGIN) {
+		data[1] &= (A4L_EV_SCAN_BEGIN | 0xffff);
+		err++;
+	}
+	if (data[2] >= boardtype.n_adchan) {
+		data[2] = boardtype.n_adchan - 1;
+		err++;
+	}
+	if (data[3] > 255) {	/* a */
+		data[3] = 255;
+		err++;
+	}
+	if (data[4] > 255) {	/* b */
+		data[4] = 255;
+		err++;
+	}
+	/*
+	 * 00 ignore
+	 * 01 set
+	 * 10 reset
+	 *
+	 * modes:
+	 *   1 level:                    +b-   +a-
+	 *     high mode                00 00 01 10
+	 *     low mode                 00 00 10 01
+	 *   2 level: (a<b)
+	 *     hysteresis low mode      10 00 00 01
+	 *     hysteresis high mode     01 00 00 10
+	 *     middle mode              10 01 01 10
+	 */
+
+	a = data[3];
+	b = data[4];
+	modebits = data[1] & 0xff;
+	if (modebits & 0xf0) {
+		/* two level mode */
+		if (b < a) {
+			/* swap order */
+			a = data[4];
+			b = data[3];
+			modebits = ((data[1] & 0xf) << 4) |
+				((data[1] & 0xf0) >> 4);
+		}
+		devpriv->atrig_low = a;
+		devpriv->atrig_high = b;
+		switch (modebits) {
+		case 0x81:	/* low hysteresis mode */
+			devpriv->atrig_mode = 6;
+			break;
+		case 0x42:	/* high hysteresis mode */
+			devpriv->atrig_mode = 3;
+			break;
+		case 0x96:	/* middle window mode */
+			devpriv->atrig_mode = 2;
+			break;
+		default:
+			data[1] &= ~0xff;
+			err++;
+		}
+	} else {
+		/* one level mode */
+		if (b != 0) {
+			data[4] = 0;
+			err++;
+		}
+		switch (modebits) {
+		case 0x06:	/* high window mode */
+			devpriv->atrig_high = a;
+			devpriv->atrig_mode = 0;
+			break;
+		case 0x09:	/* low window mode */
+			devpriv->atrig_low = a;
+			devpriv->atrig_mode = 1;
+			break;
+		default:
+			data[1] &= ~0xff;
+			err++;
+		}
+	}
+
+	if (err)
+		return -EAGAIN;
+
+	return 0;
+}
+
+int ni_ai_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	if (insn->data_size < sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_ANALOG_TRIG:
+		return ni_ai_config_analog_trig(subd, insn);
+	case A4L_INSN_CONFIG_ALT_SOURCE:
+		if (boardtype.reg_type & ni_reg_m_series_mask) {
+			if (data[1] & ~(MSeries_AI_Bypass_Cal_Sel_Pos_Mask |
+					MSeries_AI_Bypass_Cal_Sel_Neg_Mask |
+					MSeries_AI_Bypass_Mode_Mux_Mask |
+					MSeries_AO_Bypass_AO_Cal_Sel_Mask)) {
+				return -EINVAL;
+			}
+			devpriv->ai_calib_source = data[1];
+		} else if (boardtype.reg_type == ni_reg_6143) {
+			unsigned int calib_source;
+
+			calib_source = data[1] & 0xf;
+
+			if (calib_source > 0xF)
+				return -EINVAL;
+
+			devpriv->ai_calib_source = calib_source;
+			ni_writew(calib_source, Calibration_Channel_6143);
+		} else {
+			unsigned int calib_source;
+			unsigned int calib_source_adjust;
+
+			calib_source = data[1] & 0xf;
+			calib_source_adjust = (data[1] >> 4) & 0xff;
+
+			if (calib_source >= 8)
+				return -EINVAL;
+			devpriv->ai_calib_source = calib_source;
+			if (boardtype.reg_type == ni_reg_611x) {
+				ni_writeb(calib_source_adjust,
+					  Cal_Gain_Select_611x);
+			}
+		}
+		return 0;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+/* munge data from unsigned to 2's complement for analog output bipolar modes */
+static void ni_ao_munge(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	struct a4l_device *dev = subd->dev;
+	struct a4l_cmd_desc *cmd = a4l_get_cmd(subd);
+	int chan_idx = a4l_get_chan(subd);
+	uint16_t *array = buf;
+	unsigned int i, range, offset;
+
+	offset = 1 << (boardtype.aobits - 1);
+	for (i = 0; i < size / sizeof(uint16_t); i++) {
+
+		range = CR_RNG(cmd->chan_descs[chan_idx]);
+		if (boardtype.ao_unipolar == 0 || (range & 1) == 0)
+			array[i] -= offset;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		array[i] = cpu_to_le16(array[i]);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		chan_idx++;
+		chan_idx %= cmd->nb_chan;
+	}
+}
+
+static int ni_m_series_ao_config_chan_descs(struct a4l_subdevice *subd,
+					    unsigned int chanspec[],
+					    unsigned int n_chans, int timed)
+{
+	unsigned int range;
+	unsigned int chan;
+	unsigned int conf;
+	int i, invert = 0;
+	struct a4l_device *dev = subd->dev;
+
+	for (i = 0; i < boardtype.n_aochan; ++i) {
+		ni_writeb(0xf, M_Offset_AO_Waveform_Order(i));
+	}
+	for (i = 0; i < n_chans; i++) {
+		struct a4l_range *rng;
+		int idx;
+		chan = CR_CHAN(chanspec[i]);
+		range = CR_RNG(chanspec[i]);
+
+		/* TODO: this a huge hack!
+		   Something is missing in the kernel API. We must
+		   allow access on the proper range descriptor */
+		idx =  (subd->rng_desc->mode !=
+			A4L_RNG_GLOBAL_RNGDESC) ? chan : 0;
+		rng = &(subd->rng_desc->rngtabs[idx]->rngs[range]);
+
+		invert = 0;
+		conf = 0;
+		switch (rng->max - rng->min) {
+		case 20000000:
+			conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits;
+			ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		case 10000000:
+			conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits;
+			ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		case 4000000:
+			conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits;
+			ni_writeb(MSeries_Attenuate_x5_Bit,
+				  M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		case 2000000:
+			conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits;
+			ni_writeb(MSeries_Attenuate_x5_Bit,
+				  M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		default:
+			a4l_err(subd->dev,
+				"%s: bug! unhandled ao reference voltage\n",
+				__FUNCTION__);
+			break;
+		}
+		switch (rng->max + rng->min) {
+		case 0:
+			conf |= MSeries_AO_DAC_Offset_0V_Bits;
+			break;
+		case 10000000:
+			conf |= MSeries_AO_DAC_Offset_5V_Bits;
+			break;
+		default:
+			a4l_err(subd->dev,
+				"%s: bug! unhandled ao offset voltage\n",
+				__FUNCTION__);
+			break;
+		}
+		if (timed)
+			conf |= MSeries_AO_Update_Timed_Bit;
+		ni_writeb(conf, M_Offset_AO_Config_Bank(chan));
+		devpriv->ao_conf[chan] = conf;
+		ni_writeb(i, M_Offset_AO_Waveform_Order(chan));
+	}
+	return invert;
+}
+
+static int ni_old_ao_config_chan_descs(struct a4l_subdevice *subd,
+				       unsigned int chanspec[],
+				       unsigned int n_chans)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int range;
+	unsigned int chan;
+	unsigned int conf;
+	int i, invert = 0;
+
+	for (i = 0; i < n_chans; i++) {
+		chan = CR_CHAN(chanspec[i]);
+		range = CR_RNG(chanspec[i]);
+		conf = AO_Channel(chan);
+
+		if (boardtype.ao_unipolar) {
+			if ((range & 1) == 0) {
+				conf |= AO_Bipolar;
+				invert = (1 << (boardtype.aobits - 1));
+			} else {
+				invert = 0;
+			}
+			if (range & 2)
+				conf |= AO_Ext_Ref;
+		} else {
+			conf |= AO_Bipolar;
+			invert = (1 << (boardtype.aobits - 1));
+		}
+
+		/* not all boards can deglitch, but this shouldn't hurt */
+		if (chanspec[i] & CR_DEGLITCH)
+			conf |= AO_Deglitch;
+
+		/* analog reference */
+		/* AREF_OTHER connects AO ground to AI ground, i think */
+		conf |= (CR_AREF(chanspec[i]) ==
+			 AREF_OTHER) ? AO_Ground_Ref : 0;
+
+		ni_writew(conf, AO_Configuration);
+		devpriv->ao_conf[chan] = conf;
+	}
+	return invert;
+}
+
+static int ni_ao_config_chan_descs(struct a4l_subdevice *subd,
+				   unsigned int chanspec[],
+				   unsigned int n_chans, int timed)
+{
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return ni_m_series_ao_config_chan_descs(subd,
+							chanspec,
+							n_chans, timed);
+	else
+		return ni_old_ao_config_chan_descs(subd, chanspec, n_chans);
+}
+
+int ni_ao_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	data[0] = devpriv->ao[CR_CHAN(insn->chan_desc)];
+
+	return 0;
+}
+
+int ni_ao_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int chan = CR_CHAN(insn->chan_desc);
+	uint16_t *data = (uint16_t *)insn->data;
+	unsigned int invert;
+
+	invert = ni_ao_config_chan_descs(subd,
+					 &insn->chan_desc, 1, 0);
+
+	devpriv->ao[chan] = data[0];
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		ni_writew(data[0], M_Offset_DAC_Direct_Data(chan));
+	} else
+		ni_writew(data[0] ^ invert,
+			  (chan) ? DAC1_Direct_Data : DAC0_Direct_Data);
+
+	return 0;
+}
+
+int ni_ao_insn_write_671x(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int chan = CR_CHAN(insn->chan_desc);
+	uint16_t *data = (uint16_t *)insn->data;
+	unsigned int invert;
+
+	ao_win_out(1 << chan, AO_Immediate_671x);
+	invert = 1 << (boardtype.aobits - 1);
+
+	ni_ao_config_chan_descs(subd, &insn->chan_desc, 1, 0);
+
+	devpriv->ao[chan] = data[0];
+	ao_win_out(data[0] ^ invert, DACx_Direct_Data_671x(chan));
+
+	return 0;
+}
+
+int ni_ao_inttrig(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct a4l_device *dev = subd->dev;
+	int ret, interrupt_b_bits, i;
+	static const int timeout = 1000;
+
+	if (trignum != 0)
+		return -EINVAL;
+
+	/* TODO: disable trigger until a command is recorded.
+	   Null trig at beginning prevent ao start trigger from executing
+	   more than once per command (and doing things like trying to
+	   allocate the ao dma channel multiple times) */
+
+	ni_set_bits(dev, Interrupt_B_Enable_Register,
+		    AO_FIFO_Interrupt_Enable | AO_Error_Interrupt_Enable, 0);
+	interrupt_b_bits = AO_Error_Interrupt_Enable;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	devpriv->stc_writew(dev, 1, DAC_FIFO_Clear);
+	if (boardtype.reg_type & ni_reg_6xxx_mask)
+		ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x);
+	ret = ni_ao_setup_MITE_dma(subd);
+	if (ret)
+		return ret;
+	ret = ni_ao_wait_for_dma_load(subd);
+	if (ret < 0)
+		return ret;
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	ret = ni_ao_prep_fifo(subd);
+	if (ret == 0)
+		return -EPIPE;
+
+	interrupt_b_bits |= AO_FIFO_Interrupt_Enable;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	devpriv->stc_writew(dev, devpriv->ao_mode3 | AO_Not_An_UPDATE,
+			    AO_Mode_3_Register);
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+	/* wait for DACs to be loaded */
+	for (i = 0; i < timeout; i++) {
+		a4l_udelay(1);
+		if ((devpriv->stc_readw(dev,Joint_Status_2_Register) &
+		     AO_TMRDACWRs_In_Progress_St) == 0)
+			break;
+	}
+	if (i == timeout) {
+		a4l_err(dev,
+			"ni_ao_inttrig: timed out "
+			"waiting for AO_TMRDACWRs_In_Progress_St to clear");
+		return -EIO;
+	}
+	/* stc manual says we are need to clear error interrupt after
+	   AO_TMRDACWRs_In_Progress_St clears */
+	devpriv->stc_writew(dev, AO_Error_Interrupt_Ack,
+			    Interrupt_B_Ack_Register);
+
+	ni_set_bits(dev, Interrupt_B_Enable_Register, interrupt_b_bits, 1);
+
+	devpriv->stc_writew(dev,
+			    devpriv->ao_cmd1 |
+			    AO_UI_Arm | AO_UC_Arm |
+			    AO_BC_Arm | AO_DAC1_Update_Mode |
+			    AO_DAC0_Update_Mode,
+			    AO_Command_1_Register);
+
+	devpriv->stc_writew(dev,
+			    devpriv->ao_cmd2 | AO_START1_Pulse,
+			    AO_Command_2_Register);
+
+	return 0;
+}
+
+int ni_ao_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	int bits;
+	int i;
+	unsigned trigvar;
+
+	if (a4l_get_irq(dev) == A4L_IRQ_UNUSED) {
+		a4l_err(dev, "ni_ao_cmd: cannot run command without an irq");
+		return -EIO;
+	}
+
+	devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register);
+
+	devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register);
+
+	if (boardtype.reg_type & ni_reg_6xxx_mask) {
+		ao_win_out(CLEAR_WG, AO_Misc_611x);
+
+		bits = 0;
+		for (i = 0; i < cmd->nb_chan; i++) {
+			int chan;
+
+			chan = CR_CHAN(cmd->chan_descs[i]);
+			bits |= 1 << chan;
+			ao_win_out(chan, AO_Waveform_Generation_611x);
+		}
+		ao_win_out(bits, AO_Timed_611x);
+	}
+
+	ni_ao_config_chan_descs(subd, cmd->chan_descs, cmd->nb_chan, 1);
+
+	if (cmd->stop_src == TRIG_NONE) {
+		devpriv->ao_mode1 |= AO_Continuous;
+		devpriv->ao_mode1 &= ~AO_Trigger_Once;
+	} else {
+		devpriv->ao_mode1 &= ~AO_Continuous;
+		devpriv->ao_mode1 |= AO_Trigger_Once;
+	}
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_trigger_select &=
+		~(AO_START1_Polarity | AO_START1_Select(-1));
+	devpriv->ao_trigger_select |= AO_START1_Edge | AO_START1_Sync;
+	devpriv->stc_writew(dev, devpriv->ao_trigger_select,
+			    AO_Trigger_Select_Register);
+	devpriv->ao_mode3 &= ~AO_Trigger_Length;
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_mode2 &= ~AO_BC_Initial_Load_Source;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+	if (cmd->stop_src == TRIG_NONE) {
+		devpriv->stc_writel(dev, 0xffffff, AO_BC_Load_A_Register);
+	} else {
+		devpriv->stc_writel(dev, 0, AO_BC_Load_A_Register);
+	}
+	devpriv->stc_writew(dev, AO_BC_Load, AO_Command_1_Register);
+	devpriv->ao_mode2 &= ~AO_UC_Initial_Load_Source;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+	switch (cmd->stop_src) {
+	case TRIG_COUNT:
+		devpriv->stc_writel(dev, cmd->stop_arg, AO_UC_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, cmd->stop_arg - 1,
+				    AO_UC_Load_A_Register);
+		break;
+	case TRIG_NONE:
+		devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register);
+		break;
+	default:
+		devpriv->stc_writel(dev, 0, AO_UC_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, cmd->stop_arg, AO_UC_Load_A_Register);
+	}
+
+	devpriv->ao_mode1 &=
+		~(AO_UI_Source_Select(0x1f) | AO_UI_Source_Polarity |
+		  AO_UPDATE_Source_Select(0x1f) | AO_UPDATE_Source_Polarity);
+	switch (cmd->scan_begin_src) {
+	case TRIG_TIMER:
+		devpriv->ao_cmd2 &= ~AO_BC_Gate_Enable;
+		trigvar =
+			ni_ns_to_timer(dev, cmd->scan_begin_arg,
+				       TRIG_ROUND_NEAREST);
+		devpriv->stc_writel(dev, 1, AO_UI_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UI_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, trigvar, AO_UI_Load_A_Register);
+		break;
+	case TRIG_EXT:
+		devpriv->ao_mode1 |=
+			AO_UPDATE_Source_Select(cmd->scan_begin_arg);
+		if (cmd->scan_begin_arg & CR_INVERT)
+			devpriv->ao_mode1 |= AO_UPDATE_Source_Polarity;
+		devpriv->ao_cmd2 |= AO_BC_Gate_Enable;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register);
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_mode2 &=
+		~(AO_UI_Reload_Mode(3) | AO_UI_Initial_Load_Source);
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+
+	if ((boardtype.reg_type & ni_reg_6xxx_mask) == 0) {
+		if (cmd->scan_end_arg > 1) {
+			devpriv->ao_mode1 |= AO_Multiple_Channels;
+			devpriv->stc_writew(dev,
+					    AO_Number_Of_Channels(cmd->scan_end_arg - 1) |
+					    AO_UPDATE_Output_Select
+					    (AO_Update_Output_High_Z),
+					    AO_Output_Control_Register);
+		} else {
+			unsigned int bits;
+			devpriv->ao_mode1 &= ~AO_Multiple_Channels;
+			bits = AO_UPDATE_Output_Select(AO_Update_Output_High_Z);
+			if (boardtype.reg_type & ni_reg_m_series_mask) {
+				bits |= AO_Number_Of_Channels(0);
+			} else {
+				bits |= AO_Number_Of_Channels(CR_CHAN(cmd->
+								      chan_descs[0]));
+			}
+			devpriv->stc_writew(dev, bits,
+					    AO_Output_Control_Register);
+		}
+		devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	}
+
+	devpriv->stc_writew(dev, AO_DAC0_Update_Mode | AO_DAC1_Update_Mode,
+			    AO_Command_1_Register);
+
+	devpriv->ao_mode3 |= AO_Stop_On_Overrun_Error;
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+
+	devpriv->ao_mode2 &= ~AO_FIFO_Mode_Mask;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	devpriv->ao_mode2 |= AO_FIFO_Mode_HF_to_F;
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	devpriv->ao_mode2 |= AO_FIFO_Mode_HF;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	devpriv->ao_mode2 &= ~AO_FIFO_Retransmit_Enable;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+
+	bits = AO_BC_Source_Select | AO_UPDATE_Pulse_Width |
+		AO_TMRDACWR_Pulse_Width;
+	if (boardtype.ao_fifo_depth)
+		bits |= AO_FIFO_Enable;
+	else
+		bits |= AO_DMA_PIO_Control;
+#if 0
+	/* F Hess: windows driver does not set AO_Number_Of_DAC_Packages bit for 6281,
+	   verified with bus analyzer. */
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		bits |= AO_Number_Of_DAC_Packages;
+#endif
+	devpriv->stc_writew(dev, bits, AO_Personal_Register);
+	/* enable sending of ao dma requests */
+	devpriv->stc_writew(dev, AO_AOFREQ_Enable, AO_Start_Select_Register);
+
+	devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register);
+
+	if (cmd->stop_src == TRIG_COUNT) {
+		devpriv->stc_writew(dev, AO_BC_TC_Interrupt_Ack,
+				    Interrupt_B_Ack_Register);
+		ni_set_bits(dev, Interrupt_B_Enable_Register,
+			    AO_BC_TC_Interrupt_Enable, 1);
+	}
+
+	return 0;
+}
+
+struct a4l_cmd_desc mio_ao_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_INT,
+	.scan_begin_src = TRIG_TIMER | TRIG_EXT,
+	.convert_src = TRIG_NOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+int ni_ao_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	/* Make sure trigger sources are unique and mutually compatible */
+
+	if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE)
+		return -EINVAL;
+
+	/* Make sure arguments are trivially compatible */
+
+	if (cmd->start_arg != 0) {
+		cmd->start_arg = 0;
+		return -EINVAL;
+	}
+
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+		if (cmd->scan_begin_arg < boardtype.ao_speed) {
+			cmd->scan_begin_arg = boardtype.ao_speed;
+			return -EINVAL;
+		}
+		if (cmd->scan_begin_arg > devpriv->clock_ns * 0xffffff) {
+			/* XXX check */
+			cmd->scan_begin_arg = devpriv->clock_ns * 0xffffff;
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->convert_arg != 0) {
+		cmd->convert_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->scan_end_arg != cmd->nb_chan) {
+		cmd->scan_end_arg = cmd->nb_chan;
+		return -EINVAL;
+	}
+	if (cmd->stop_src == TRIG_COUNT) {
+		/* XXX check */
+		if (cmd->stop_arg > 0x00ffffff) {
+			cmd->stop_arg = 0x00ffffff;
+			return -EINVAL;
+		}
+	} else {
+		/* TRIG_NONE */
+		if (cmd->stop_arg != 0) {
+			cmd->stop_arg = 0;
+			return -EINVAL;
+		}
+	}
+
+	/* step 4: fix up any arguments */
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+
+		if(cmd->scan_begin_arg !=
+		   ni_timer_to_ns(dev,
+				  ni_ns_to_timer(dev,
+						 cmd->scan_begin_arg,
+						 cmd->flags & TRIG_ROUND_MASK)))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+void ni_ao_reset(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	ni_release_ao_mite_channel(dev);
+
+	devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register);
+	devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register);
+	ni_set_bits(dev, Interrupt_B_Enable_Register, ~0, 0);
+	devpriv->stc_writew(dev, AO_BC_Source_Select, AO_Personal_Register);
+	devpriv->stc_writew(dev, 0x3f98, Interrupt_B_Ack_Register);
+	devpriv->stc_writew(dev, AO_BC_Source_Select | AO_UPDATE_Pulse_Width |
+			    AO_TMRDACWR_Pulse_Width, AO_Personal_Register);
+	devpriv->stc_writew(dev, 0, AO_Output_Control_Register);
+	devpriv->stc_writew(dev, 0, AO_Start_Select_Register);
+	devpriv->ao_cmd1 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_cmd1, AO_Command_1_Register);
+	devpriv->ao_cmd2 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register);
+	devpriv->ao_mode1 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_mode2 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		devpriv->ao_mode3 = AO_Last_Gate_Disable;
+	else
+		devpriv->ao_mode3 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+	devpriv->ao_trigger_select = 0;
+	devpriv->stc_writew(dev, devpriv->ao_trigger_select,
+			    AO_Trigger_Select_Register);
+	if (boardtype.reg_type & ni_reg_6xxx_mask) {
+		ao_win_out(0x3, AO_Immediate_671x);
+		ao_win_out(CLEAR_WG, AO_Misc_611x);
+	}
+	devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register);
+}
+
+/* digital io */
+
+int ni_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "chan=%d io=%d\n", CR_CHAN(insn->chan_desc), data[0]);
+#endif /* CONFIG_DEBUG_DIO */
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		devpriv->io_bits |= 1 << CR_CHAN(insn->chan_desc);
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		devpriv->io_bits &= ~(1 << CR_CHAN(insn->chan_desc));
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (devpriv->io_bits &
+			   (1 << CR_CHAN(insn->chan_desc))) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	devpriv->dio_control &= ~DIO_Pins_Dir_Mask;
+	devpriv->dio_control |= DIO_Pins_Dir(devpriv->io_bits);
+	devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
+
+	return 1;
+}
+
+int ni_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]);
+#endif
+
+	if (insn->data_size != 2 * sizeof(uint8_t))
+		return -EINVAL;
+
+	if (data[0]) {
+		/* Perform check to make sure we're not using the
+		   serial part of the dio */
+		if ((data[0] & (DIO_SDIN | DIO_SDOUT))
+		    && devpriv->serial_interval_ns)
+			return -EBUSY;
+
+		devpriv->dio_state &= ~data[0];
+		devpriv->dio_state |= (data[0] & data[1]);
+		devpriv->dio_output &= ~DIO_Parallel_Data_Mask;
+		devpriv->dio_output |=
+			DIO_Parallel_Data_Out(devpriv->dio_state);
+		devpriv->stc_writew(dev, devpriv->dio_output,
+				    DIO_Output_Register);
+	}
+
+	data[1] = (uint8_t)
+		devpriv->stc_readw(dev, DIO_Parallel_Input_Register);
+
+	return 0;
+}
+
+int ni_m_series_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "chan=%d io=%d\n", CR_CHAN(insn->chan_desc), data[0]);
+#endif
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		devpriv->io_bits |= 1 << CR_CHAN(insn->chan_desc);
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		devpriv->io_bits &= ~(1 << CR_CHAN(insn->chan_desc));
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (devpriv->io_bits &
+			   (1 << CR_CHAN(insn->chan_desc))) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ni_writel(devpriv->io_bits, M_Offset_DIO_Direction);
+
+	return 0;
+}
+
+int ni_m_series_dio_insn_bits_8(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]);
+#endif
+
+	if (insn->data_size != 2 * sizeof(uint8_t))
+		return -EINVAL;
+
+	if (data[0]) {
+		devpriv->dio_state &= ~data[0];
+		devpriv->dio_state |= (data[0] & data[1]);
+		ni_writel(devpriv->dio_state, M_Offset_Static_Digital_Output);
+	}
+
+	data[1] = (uint8_t) ni_readl(M_Offset_Static_Digital_Input);
+
+	return 0;
+}
+
+int ni_m_series_dio_insn_bits_32(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint32_t *data = (uint32_t *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]);
+#endif
+
+	if (insn->data_size != 2 * sizeof(uint32_t))
+		return -EINVAL;
+
+	if (data[0]) {
+		devpriv->dio_state &= ~data[0];
+		devpriv->dio_state |= (data[0] & data[1]);
+		ni_writel(devpriv->dio_state, M_Offset_Static_Digital_Output);
+	}
+
+	data[1] = ni_readl(M_Offset_Static_Digital_Input);
+
+	return 0;
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+struct a4l_cmd_desc mio_dio_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_INT,
+	.scan_begin_src = TRIG_EXT,
+	.convert_src = TRIG_NOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+int ni_cdio_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	unsigned int i;
+
+	/* Make sure arguments are trivially compatible */
+
+	if (cmd->start_arg != 0) {
+		cmd->start_arg = 0;
+		return -EINVAL;
+	}
+
+	if ((cmd->scan_begin_arg &
+	     PACK_FLAGS(CDO_Sample_Source_Select_Mask, 0, 0, CR_INVERT)) !=
+	    cmd->scan_begin_arg)
+		return -EINVAL;
+
+	if (cmd->convert_arg != 0) {
+		cmd->convert_arg = 0;
+		return -EINVAL;
+	}
+
+	if (cmd->scan_end_arg != cmd->nb_chan) {
+		cmd->scan_end_arg = cmd->nb_chan;
+		return -EINVAL;
+	}
+
+	if (cmd->stop_arg != 0) {
+		cmd->stop_arg = 0;
+		return -EINVAL;
+	}
+
+	/* Check chan_descs */
+
+	for (i = 0; i < cmd->nb_chan; ++i) {
+		if (cmd->chan_descs[i] != i)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+int ni_cdio_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned cdo_mode_bits = CDO_FIFO_Mode_Bit | CDO_Halt_On_Error_Bit;
+
+	ni_writel(CDO_Reset_Bit, M_Offset_CDIO_Command);
+	switch (cmd->scan_begin_src) {
+	case TRIG_EXT:
+		cdo_mode_bits |=
+			CR_CHAN(cmd->scan_begin_arg) &
+			CDO_Sample_Source_Select_Mask;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	if (cmd->scan_begin_arg & CR_INVERT)
+		cdo_mode_bits |= CDO_Polarity_Bit;
+	ni_writel(cdo_mode_bits, M_Offset_CDO_Mode);
+
+	if (devpriv->io_bits) {
+		ni_writel(devpriv->dio_state, M_Offset_CDO_FIFO_Data);
+		ni_writel(CDO_SW_Update_Bit, M_Offset_CDIO_Command);
+		ni_writel(devpriv->io_bits, M_Offset_CDO_Mask_Enable);
+	} else {
+		a4l_err(dev,
+			"ni_cdio_cmd: attempted to run digital "
+			"output command with no lines configured as outputs");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+void ni_cdio_cancel(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	ni_writel(CDO_Disarm_Bit | CDO_Error_Interrupt_Enable_Clear_Bit |
+		  CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit |
+		  CDO_FIFO_Request_Interrupt_Enable_Clear_Bit,
+		  M_Offset_CDIO_Command);
+
+	ni_writel(0, M_Offset_CDO_Mask_Enable);
+	ni_release_cdo_mite_channel(dev);
+}
+
+int ni_cdo_inttrig(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct a4l_device *dev = subd->dev;
+	int err;
+	unsigned i;
+	const unsigned timeout = 1000;
+
+	/* TODO: disable trigger until a command is recorded.
+	   Null trig at beginning prevent ao start trigger from executing
+	   more than once per command (and doing things like trying to
+	   allocate the ao dma channel multiple times) */
+
+	err = ni_cdo_setup_MITE_dma(subd);
+	if (err < 0)
+		return err;
+
+	/* wait for dma to fill output fifo */
+	for (i = 0; i < timeout; ++i) {
+		if (ni_readl(M_Offset_CDIO_Status) & CDO_FIFO_Full_Bit)
+			break;
+		a4l_udelay(10);
+	}
+
+	if (i == timeout) {
+		a4l_err(dev, "ni_cdo_inttrig: dma failed to fill cdo fifo!");
+		ni_cdio_cancel(subd);
+		return -EIO;
+	}
+
+	ni_writel(CDO_Arm_Bit |
+		  CDO_Error_Interrupt_Enable_Set_Bit |
+		  CDO_Empty_FIFO_Interrupt_Enable_Set_Bit,
+		  M_Offset_CDIO_Command);
+
+	return 0;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static void handle_cdio_interrupt(struct a4l_device *dev)
+{
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	unsigned cdio_status;
+	unsigned long flags;
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_DIO_SUBDEV);
+
+	if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) {
+		return;
+	}
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->cdo_mite_chan) {
+		unsigned cdo_mite_status =
+			a4l_mite_get_status(devpriv->cdo_mite_chan);
+		if (cdo_mite_status & CHSR_LINKC) {
+			writel(CHOR_CLRLC,
+			       devpriv->mite->mite_io_addr +
+			       MITE_CHOR(devpriv->cdo_mite_chan->channel));
+		}
+		a4l_mite_sync_output_dma(devpriv->cdo_mite_chan, subd);
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	cdio_status = ni_readl(M_Offset_CDIO_Status);
+	if (cdio_status & (CDO_Overrun_Bit | CDO_Underflow_Bit)) {
+		/* XXX just guessing this is needed and does something useful */
+		ni_writel(CDO_Error_Interrupt_Confirm_Bit, M_Offset_CDIO_Command);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+	if (cdio_status & CDO_FIFO_Empty_Bit) {
+		ni_writel(CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit,
+			  M_Offset_CDIO_Command);
+	}
+	a4l_buf_evt(subd, 0);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+}
+
+static int ni_serial_hw_readwrite8(struct a4l_device * dev,
+				   unsigned char data_out, unsigned char *data_in)
+{
+	unsigned int status1;
+	int err = 0, count = 20;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "outputting 0x%x\n", data_out);
+#endif
+
+	devpriv->dio_output &= ~DIO_Serial_Data_Mask;
+	devpriv->dio_output |= DIO_Serial_Data_Out(data_out);
+	devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register);
+
+	status1 = devpriv->stc_readw(dev, Joint_Status_1_Register);
+	if (status1 & DIO_Serial_IO_In_Progress_St) {
+		err = -EBUSY;
+		goto Error;
+	}
+
+	devpriv->dio_control |= DIO_HW_Serial_Start;
+	devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
+	devpriv->dio_control &= ~DIO_HW_Serial_Start;
+
+	/* Wait until STC says we're done, but don't loop infinitely. */
+	while ((status1 =
+		devpriv->stc_readw(dev,
+				   Joint_Status_1_Register)) &
+	       DIO_Serial_IO_In_Progress_St) {
+		/* Delay one bit per loop */
+		a4l_udelay((devpriv->serial_interval_ns + 999) / 1000);
+		if (--count < 0) {
+			a4l_err(dev,
+				"ni_serial_hw_readwrite8: "
+				"SPI serial I/O didn't finish in time!\n");
+			err = -ETIME;
+			goto Error;
+		}
+	}
+
+	/* Delay for last bit. This delay is absolutely necessary, because
+	   DIO_Serial_IO_In_Progress_St goes high one bit too early. */
+	a4l_udelay((devpriv->serial_interval_ns + 999) / 1000);
+
+	if (data_in != NULL) {
+		*data_in = devpriv->stc_readw(dev, DIO_Serial_Input_Register);
+#ifdef CONFIG_DEBUG_DIO
+		a4l_info(dev, "inputted 0x%x\n", *data_in);
+#endif
+	}
+
+Error:
+	devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
+
+	return err;
+}
+
+static int ni_serial_sw_readwrite8(struct a4l_device * dev,
+				   unsigned char data_out, unsigned char *data_in)
+{
+	unsigned char mask, input = 0;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "outputting 0x%x\n", data_out);
+#endif
+
+	/* Wait for one bit before transfer */
+	a4l_udelay((devpriv->serial_interval_ns + 999) / 1000);
+
+	for (mask = 0x80; mask; mask >>= 1) {
+		/* Output current bit; note that we cannot touch devpriv->dio_state
+		   because it is a per-subdevice field, and serial is
+		   a separate subdevice from DIO. */
+		devpriv->dio_output &= ~DIO_SDOUT;
+		if (data_out & mask) {
+			devpriv->dio_output |= DIO_SDOUT;
+		}
+		devpriv->stc_writew(dev, devpriv->dio_output,
+				    DIO_Output_Register);
+
+		/* Assert SDCLK (active low, inverted), wait for half of
+		   the delay, deassert SDCLK, and wait for the other half. */
+		devpriv->dio_control |= DIO_Software_Serial_Control;
+		devpriv->stc_writew(dev, devpriv->dio_control,
+				    DIO_Control_Register);
+
+		a4l_udelay((devpriv->serial_interval_ns + 999) / 2000);
+
+		devpriv->dio_control &= ~DIO_Software_Serial_Control;
+		devpriv->stc_writew(dev, devpriv->dio_control,
+				    DIO_Control_Register);
+
+		a4l_udelay((devpriv->serial_interval_ns + 999) / 2000);
+
+		/* Input current bit */
+		if (devpriv->stc_readw(dev,
+				       DIO_Parallel_Input_Register) & DIO_SDIN) {
+			input |= mask;
+		}
+	}
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "inputted 0x%x\n", input);
+#endif
+	if (data_in)
+		*data_in = input;
+
+	return 0;
+}
+
+int ni_serial_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	int err = 0;
+	unsigned char byte_out, byte_in = 0;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	if (insn->data_size != 2 * sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_SERIAL_CLOCK:
+
+#ifdef CONFIG_DEBUG_DIO
+		a4l_info(dev, "SPI serial clock Config %d\n", data[1]);
+#endif
+
+		devpriv->serial_hw_mode = 1;
+		devpriv->dio_control |= DIO_HW_Serial_Enable;
+
+		if (data[1] == SERIAL_DISABLED) {
+			devpriv->serial_hw_mode = 0;
+			devpriv->dio_control &= ~(DIO_HW_Serial_Enable |
+						  DIO_Software_Serial_Control);
+			data[1] = SERIAL_DISABLED;
+			devpriv->serial_interval_ns = data[1];
+		} else if (data[1] <= SERIAL_600NS) {
+			/* Warning: this clock speed is too fast to reliably
+			   control SCXI. */
+			devpriv->dio_control &= ~DIO_HW_Serial_Timebase;
+			devpriv->clock_and_fout |= Slow_Internal_Timebase;
+			devpriv->clock_and_fout &= ~DIO_Serial_Out_Divide_By_2;
+			data[1] = SERIAL_600NS;
+			devpriv->serial_interval_ns = data[1];
+		} else if (data[1] <= SERIAL_1_2US) {
+			devpriv->dio_control &= ~DIO_HW_Serial_Timebase;
+			devpriv->clock_and_fout |= Slow_Internal_Timebase |
+				DIO_Serial_Out_Divide_By_2;
+			data[1] = SERIAL_1_2US;
+			devpriv->serial_interval_ns = data[1];
+		} else if (data[1] <= SERIAL_10US) {
+			devpriv->dio_control |= DIO_HW_Serial_Timebase;
+			devpriv->clock_and_fout |= Slow_Internal_Timebase |
+				DIO_Serial_Out_Divide_By_2;
+			/* Note: DIO_Serial_Out_Divide_By_2 only affects
+			   600ns/1.2us. If you turn divide_by_2 off with the
+			   slow clock, you will still get 10us, except then
+			   all your delays are wrong. */
+			data[1] = SERIAL_10US;
+			devpriv->serial_interval_ns = data[1];
+		} else {
+			devpriv->dio_control &= ~(DIO_HW_Serial_Enable |
+						  DIO_Software_Serial_Control);
+			devpriv->serial_hw_mode = 0;
+			data[1] = (data[1] / 1000) * 1000;
+			devpriv->serial_interval_ns = data[1];
+		}
+
+		devpriv->stc_writew(dev, devpriv->dio_control,
+				    DIO_Control_Register);
+		devpriv->stc_writew(dev, devpriv->clock_and_fout,
+				    Clock_and_FOUT_Register);
+		return 0;
+
+		break;
+
+	case A4L_INSN_CONFIG_BIDIRECTIONAL_DATA:
+
+		if (devpriv->serial_interval_ns == 0) {
+			return -EINVAL;
+		}
+
+		byte_out = data[1] & 0xFF;
+
+		if (devpriv->serial_hw_mode) {
+			err = ni_serial_hw_readwrite8(dev, byte_out, &byte_in);
+		} else if (devpriv->serial_interval_ns > 0) {
+			err = ni_serial_sw_readwrite8(dev, byte_out, &byte_in);
+		} else {
+			a4l_err(dev,
+				"ni_serial_insn_config: serial disabled!\n");
+			return -EINVAL;
+		}
+		if (err < 0)
+			return err;
+		data[1] = byte_in & 0xFF;
+		return 0;
+
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return -EINVAL;
+}
+
+void mio_common_detach(struct a4l_device * dev)
+{
+	if (dev->priv) {
+		if (devpriv->counter_dev) {
+			a4l_ni_gpct_device_destroy(devpriv->counter_dev);
+		}
+	}
+}
+
+static void init_ao_67xx(struct a4l_device * dev)
+{
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AO_SUBDEV);
+	int i;
+
+	if (subd == NULL) {
+		a4l_err(dev, "%s: unable to find AO subdevice\n", __FUNCTION__);
+		return;
+	}
+
+	for (i = 0; i < subd->chan_desc->length; i++)
+		ni_ao_win_outw(dev, AO_Channel(i) | 0x0,
+			       AO_Configuration_2_67xx);
+}
+
+static unsigned int ni_gpct_to_stc_register(enum ni_gpct_register reg)
+{
+	unsigned stc_register;
+	switch (reg) {
+	case NITIO_G0_Autoincrement_Reg:
+		stc_register = G_Autoincrement_Register(0);
+		break;
+	case NITIO_G1_Autoincrement_Reg:
+		stc_register = G_Autoincrement_Register(1);
+		break;
+	case NITIO_G0_Command_Reg:
+		stc_register = G_Command_Register(0);
+		break;
+	case NITIO_G1_Command_Reg:
+		stc_register = G_Command_Register(1);
+		break;
+	case NITIO_G0_HW_Save_Reg:
+		stc_register = G_HW_Save_Register(0);
+		break;
+	case NITIO_G1_HW_Save_Reg:
+		stc_register = G_HW_Save_Register(1);
+		break;
+	case NITIO_G0_SW_Save_Reg:
+		stc_register = G_Save_Register(0);
+		break;
+	case NITIO_G1_SW_Save_Reg:
+		stc_register = G_Save_Register(1);
+		break;
+	case NITIO_G0_Mode_Reg:
+		stc_register = G_Mode_Register(0);
+		break;
+	case NITIO_G1_Mode_Reg:
+		stc_register = G_Mode_Register(1);
+		break;
+	case NITIO_G0_LoadA_Reg:
+		stc_register = G_Load_A_Register(0);
+		break;
+	case NITIO_G1_LoadA_Reg:
+		stc_register = G_Load_A_Register(1);
+		break;
+	case NITIO_G0_LoadB_Reg:
+		stc_register = G_Load_B_Register(0);
+		break;
+	case NITIO_G1_LoadB_Reg:
+		stc_register = G_Load_B_Register(1);
+		break;
+	case NITIO_G0_Input_Select_Reg:
+		stc_register = G_Input_Select_Register(0);
+		break;
+	case NITIO_G1_Input_Select_Reg:
+		stc_register = G_Input_Select_Register(1);
+		break;
+	case NITIO_G01_Status_Reg:
+		stc_register = G_Status_Register;
+		break;
+	case NITIO_G01_Joint_Reset_Reg:
+		stc_register = Joint_Reset_Register;
+		break;
+	case NITIO_G01_Joint_Status1_Reg:
+		stc_register = Joint_Status_1_Register;
+		break;
+	case NITIO_G01_Joint_Status2_Reg:
+		stc_register = Joint_Status_2_Register;
+		break;
+	case NITIO_G0_Interrupt_Acknowledge_Reg:
+		stc_register = Interrupt_A_Ack_Register;
+		break;
+	case NITIO_G1_Interrupt_Acknowledge_Reg:
+		stc_register = Interrupt_B_Ack_Register;
+		break;
+	case NITIO_G0_Status_Reg:
+		stc_register = AI_Status_1_Register;
+		break;
+	case NITIO_G1_Status_Reg:
+		stc_register = AO_Status_1_Register;
+		break;
+	case NITIO_G0_Interrupt_Enable_Reg:
+		stc_register = Interrupt_A_Enable_Register;
+		break;
+	case NITIO_G1_Interrupt_Enable_Reg:
+		stc_register = Interrupt_B_Enable_Register;
+		break;
+	default:
+		__a4l_err("%s: unhandled register 0x%x in switch.\n",
+			  __FUNCTION__, reg);
+		BUG();
+		return 0;
+		break;
+	}
+	return stc_register;
+}
+
+static void ni_gpct_write_register(struct ni_gpct *counter,
+				   unsigned int bits, enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	unsigned stc_register;
+	/* bits in the join reset register which are relevant to counters */
+	static const unsigned gpct_joint_reset_mask = G0_Reset | G1_Reset;
+	static const unsigned gpct_interrupt_a_enable_mask =
+		G0_Gate_Interrupt_Enable | G0_TC_Interrupt_Enable;
+	static const unsigned gpct_interrupt_b_enable_mask =
+		G1_Gate_Interrupt_Enable | G1_TC_Interrupt_Enable;
+
+	switch (reg) {
+		/* m-series-only registers */
+	case NITIO_G0_Counting_Mode_Reg:
+		ni_writew(bits, M_Offset_G0_Counting_Mode);
+		break;
+	case NITIO_G1_Counting_Mode_Reg:
+		ni_writew(bits, M_Offset_G1_Counting_Mode);
+		break;
+	case NITIO_G0_Second_Gate_Reg:
+		ni_writew(bits, M_Offset_G0_Second_Gate);
+		break;
+	case NITIO_G1_Second_Gate_Reg:
+		ni_writew(bits, M_Offset_G1_Second_Gate);
+		break;
+	case NITIO_G0_DMA_Config_Reg:
+		ni_writew(bits, M_Offset_G0_DMA_Config);
+		break;
+	case NITIO_G1_DMA_Config_Reg:
+		ni_writew(bits, M_Offset_G1_DMA_Config);
+		break;
+	case NITIO_G0_ABZ_Reg:
+		ni_writew(bits, M_Offset_G0_MSeries_ABZ);
+		break;
+	case NITIO_G1_ABZ_Reg:
+		ni_writew(bits, M_Offset_G1_MSeries_ABZ);
+		break;
+
+		/* 32 bit registers */
+	case NITIO_G0_LoadA_Reg:
+	case NITIO_G1_LoadA_Reg:
+	case NITIO_G0_LoadB_Reg:
+	case NITIO_G1_LoadB_Reg:
+		stc_register = ni_gpct_to_stc_register(reg);
+		devpriv->stc_writel(dev, bits, stc_register);
+		break;
+
+		/* 16 bit registers */
+	case NITIO_G0_Interrupt_Enable_Reg:
+		BUG_ON(bits & ~gpct_interrupt_a_enable_mask);
+		ni_set_bitfield(dev, Interrupt_A_Enable_Register,
+				gpct_interrupt_a_enable_mask, bits);
+		break;
+	case NITIO_G1_Interrupt_Enable_Reg:
+		BUG_ON(bits & ~gpct_interrupt_b_enable_mask);
+		ni_set_bitfield(dev, Interrupt_B_Enable_Register,
+				gpct_interrupt_b_enable_mask, bits);
+		break;
+	case NITIO_G01_Joint_Reset_Reg:
+		BUG_ON(bits & ~gpct_joint_reset_mask);
+		fallthrough;
+	default:
+		stc_register = ni_gpct_to_stc_register(reg);
+		devpriv->stc_writew(dev, bits, stc_register);
+	}
+}
+
+static unsigned int ni_gpct_read_register(struct ni_gpct *counter,
+					  enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	unsigned int stc_register;
+	switch (reg) {
+		/* m-series only registers */
+	case NITIO_G0_DMA_Status_Reg:
+		return ni_readw(M_Offset_G0_DMA_Status);
+		break;
+	case NITIO_G1_DMA_Status_Reg:
+		return ni_readw(M_Offset_G1_DMA_Status);
+		break;
+
+		/* 32 bit registers */
+	case NITIO_G0_HW_Save_Reg:
+	case NITIO_G1_HW_Save_Reg:
+	case NITIO_G0_SW_Save_Reg:
+	case NITIO_G1_SW_Save_Reg:
+		stc_register = ni_gpct_to_stc_register(reg);
+		return devpriv->stc_readl(dev, stc_register);
+		break;
+
+		/* 16 bit registers */
+	default:
+		stc_register = ni_gpct_to_stc_register(reg);
+		return devpriv->stc_readw(dev, stc_register);
+		break;
+	}
+	return 0;
+}
+
+int ni_freq_out_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] = FOUT_Divider(devpriv->clock_and_fout);
+
+	return 0;
+}
+
+int ni_freq_out_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	devpriv->clock_and_fout &= ~FOUT_Enable;
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+	devpriv->clock_and_fout &= ~FOUT_Divider_mask;
+	devpriv->clock_and_fout |= FOUT_Divider(data[0]);
+	devpriv->clock_and_fout |= FOUT_Enable;
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+
+	return 0;
+}
+
+static int ni_set_freq_out_clock(struct a4l_device * dev, lsampl_t clock_source)
+{
+	switch (clock_source) {
+	case NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC:
+		devpriv->clock_and_fout &= ~FOUT_Timebase_Select;
+		break;
+	case NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC:
+		devpriv->clock_and_fout |= FOUT_Timebase_Select;
+		break;
+	default:
+		return -EINVAL;
+	}
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+
+	return 0;
+}
+
+static void ni_get_freq_out_clock(struct a4l_device * dev,
+				  unsigned int * clock_source,
+				  unsigned int * clock_period_ns)
+{
+	if (devpriv->clock_and_fout & FOUT_Timebase_Select) {
+		*clock_source = NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC;
+		*clock_period_ns = TIMEBASE_2_NS;
+	} else {
+		*clock_source = NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC;
+		*clock_period_ns = TIMEBASE_1_NS * 2;
+	}
+}
+
+int ni_freq_out_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_SET_CLOCK_SRC:
+		return ni_set_freq_out_clock(dev, data[1]);
+		break;
+	case A4L_INSN_CONFIG_GET_CLOCK_SRC:
+		ni_get_freq_out_clock(dev, &data[1], &data[2]);
+		return 0;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int ni_8255_callback(int dir, int port, int data, unsigned long arg)
+{
+	struct a4l_device *dev = (struct a4l_device *) arg;
+
+	if (dir) {
+		ni_writeb(data, Port_A + 2 * port);
+		return 0;
+	} else {
+		return ni_readb(Port_A + 2 * port);
+	}
+}
+
+/*
+  reads bytes out of eeprom
+*/
+
+static int ni_read_eeprom(struct a4l_device *dev, int addr)
+{
+	int bit;
+	int bitstring;
+
+	bitstring = 0x0300 | ((addr & 0x100) << 3) | (addr & 0xff);
+	ni_writeb(0x04, Serial_Command);
+	for (bit = 0x8000; bit; bit >>= 1) {
+		ni_writeb(0x04 | ((bit & bitstring) ? 0x02 : 0),
+			  Serial_Command);
+		ni_writeb(0x05 | ((bit & bitstring) ? 0x02 : 0),
+			  Serial_Command);
+	}
+	bitstring = 0;
+	for (bit = 0x80; bit; bit >>= 1) {
+		ni_writeb(0x04, Serial_Command);
+		ni_writeb(0x05, Serial_Command);
+		bitstring |= ((ni_readb(XXX_Status) & PROMOUT) ? bit : 0);
+	}
+	ni_writeb(0x00, Serial_Command);
+
+	return bitstring;
+}
+
+/*
+  presents the EEPROM as a subdevice
+*/
+
+static int ni_eeprom_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] = ni_read_eeprom(dev, CR_CHAN(insn->chan_desc));
+
+	return 0;
+}
+
+
+static int ni_m_series_eeprom_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] = devpriv->eeprom_buffer[CR_CHAN(insn->chan_desc)];
+
+	return 0;
+}
+
+static int ni_get_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int*)insn->data;
+
+	data[1] = devpriv->pwm_up_count * devpriv->clock_ns;
+	data[2] = devpriv->pwm_down_count * devpriv->clock_ns;
+
+	return 0;
+}
+
+static int ni_m_series_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int up_count, down_count;
+	unsigned int *data = (unsigned int*)insn->data;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_PWM_OUTPUT:
+		switch (data[1]) {
+		case TRIG_ROUND_NEAREST:
+			up_count =
+				(data[2] +
+				 devpriv->clock_ns / 2) / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			up_count = data[2] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			up_count =(data[2] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		switch (data[3]) {
+		case TRIG_ROUND_NEAREST:
+			down_count = (data[4] + devpriv->clock_ns / 2) /
+				devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			down_count = data[4] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			down_count =
+				(data[4] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		if (up_count * devpriv->clock_ns != data[2] ||
+		    down_count * devpriv->clock_ns != data[4]) {
+			data[2] = up_count * devpriv->clock_ns;
+			data[4] = down_count * devpriv->clock_ns;
+			return -EAGAIN;
+		}
+		ni_writel(MSeries_Cal_PWM_High_Time_Bits(up_count) |
+			  MSeries_Cal_PWM_Low_Time_Bits(down_count),
+			  M_Offset_Cal_PWM);
+		devpriv->pwm_up_count = up_count;
+		devpriv->pwm_down_count = down_count;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_GET_PWM_OUTPUT:
+		return ni_get_pwm_config(subd, insn);
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static int ni_6143_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int*)insn->data;
+
+	unsigned up_count, down_count;
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_PWM_OUTPUT:
+		switch (data[1]) {
+		case TRIG_ROUND_NEAREST:
+			up_count =
+				(data[2] + devpriv->clock_ns / 2) /
+				devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			up_count = data[2] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			up_count = (data[2] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		switch (data[3]) {
+		case TRIG_ROUND_NEAREST:
+			down_count = (data[4] + devpriv->clock_ns / 2) /
+				devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			down_count = data[4] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			down_count = (data[4] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		if (up_count * devpriv->clock_ns != data[2] ||
+		    down_count * devpriv->clock_ns != data[4]) {
+			data[2] = up_count * devpriv->clock_ns;
+			data[4] = down_count * devpriv->clock_ns;
+			return -EAGAIN;
+		}
+		ni_writel(up_count, Calibration_HighTime_6143);
+		devpriv->pwm_up_count = up_count;
+		ni_writel(down_count, Calibration_LowTime_6143);
+		devpriv->pwm_down_count = down_count;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_GET_PWM_OUTPUT:
+		return ni_get_pwm_config(subd, insn);
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static int pack_mb88341(int addr, int val, int *bitstring)
+{
+	/*
+	  Fujitsu MB 88341
+	  Note that address bits are reversed.  Thanks to
+	  Ingo Keen for noticing this.
+
+	  Note also that the 88341 expects address values from
+	  1-12, whereas we use channel numbers 0-11.  The NI
+	  docs use 1-12, also, so be careful here.
+	*/
+	addr++;
+	*bitstring = ((addr & 0x1) << 11) |
+		((addr & 0x2) << 9) |
+		((addr & 0x4) << 7) | ((addr & 0x8) << 5) | (val & 0xff);
+	return 12;
+}
+
+static int pack_dac8800(int addr, int val, int *bitstring)
+{
+	*bitstring = ((addr & 0x7) << 8) | (val & 0xff);
+	return 11;
+}
+
+static int pack_dac8043(int addr, int val, int *bitstring)
+{
+	*bitstring = val & 0xfff;
+	return 12;
+}
+
+static int pack_ad8522(int addr, int val, int *bitstring)
+{
+	*bitstring = (val & 0xfff) | (addr ? 0xc000 : 0xa000);
+	return 16;
+}
+
+static int pack_ad8804(int addr, int val, int *bitstring)
+{
+	*bitstring = ((addr & 0xf) << 8) | (val & 0xff);
+	return 12;
+}
+
+static int pack_ad8842(int addr, int val, int *bitstring)
+{
+	*bitstring = ((addr + 1) << 8) | (val & 0xff);
+	return 12;
+}
+
+struct caldac_struct {
+	int n_chans;
+	int n_bits;
+	int (*packbits) (int, int, int *);
+};
+
+static struct caldac_struct caldacs[] = {
+	[mb88341] = {12, 8, pack_mb88341},
+	[dac8800] = {8, 8, pack_dac8800},
+	[dac8043] = {1, 12, pack_dac8043},
+	[ad8522] = {2, 12, pack_ad8522},
+	[ad8804] = {12, 8, pack_ad8804},
+	[ad8842] = {8, 8, pack_ad8842},
+	[ad8804_debug] = {16, 8, pack_ad8804},
+};
+
+static void ni_write_caldac(struct a4l_device * dev, int addr, int val)
+{
+	unsigned int loadbit = 0, bits = 0, bit, bitstring = 0;
+	int i;
+	int type;
+
+	if (devpriv->caldacs[addr] == val)
+		return;
+	devpriv->caldacs[addr] = val;
+
+	for (i = 0; i < 3; i++) {
+		type = boardtype.caldac[i];
+		if (type == caldac_none)
+			break;
+		if (addr < caldacs[type].n_chans) {
+			bits = caldacs[type].packbits(addr, val, &bitstring);
+			loadbit = SerDacLd(i);
+			break;
+		}
+		addr -= caldacs[type].n_chans;
+	}
+
+	for (bit = 1 << (bits - 1); bit; bit >>= 1) {
+		ni_writeb(((bit & bitstring) ? 0x02 : 0), Serial_Command);
+		a4l_udelay(1);
+		ni_writeb(1 | ((bit & bitstring) ? 0x02 : 0), Serial_Command);
+		a4l_udelay(1);
+	}
+	ni_writeb(loadbit, Serial_Command);
+	a4l_udelay(1);
+	ni_writeb(0, Serial_Command);
+}
+
+static void caldac_setup(struct a4l_device *dev, struct a4l_subdevice *subd)
+{
+	int i, j;
+	int n_dacs;
+	int n_chans = 0;
+	int n_bits;
+	int diffbits = 0;
+	int type;
+	int chan;
+
+	type = boardtype.caldac[0];
+	if (type == caldac_none)
+		return;
+	n_bits = caldacs[type].n_bits;
+	for (i = 0; i < 3; i++) {
+		type = boardtype.caldac[i];
+		if (type == caldac_none)
+			break;
+		if (caldacs[type].n_bits != n_bits)
+			diffbits = 1;
+		n_chans += caldacs[type].n_chans;
+	}
+	n_dacs = i;
+
+	if (diffbits) {
+
+		if (n_chans > MAX_N_CALDACS) {
+			a4l_err(dev, "BUG! MAX_N_CALDACS too small\n");
+		}
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  n_chans * sizeof(struct a4l_channel), GFP_KERNEL);
+
+		memset(subd->chan_desc,
+		       0,
+		       sizeof(struct a4l_channels_desc) + n_chans * sizeof(struct a4l_channel));
+
+		subd->chan_desc->length = n_chans;
+		subd->chan_desc->mode = A4L_CHAN_PERCHAN_CHANDESC;
+
+		chan = 0;
+		for (i = 0; i < n_dacs; i++) {
+			type = boardtype.caldac[i];
+			for (j = 0; j < caldacs[type].n_chans; j++) {
+
+				subd->chan_desc->chans[chan].nb_bits =
+					caldacs[type].n_bits;
+
+				chan++;
+			}
+		}
+
+		for (chan = 0; chan < n_chans; chan++) {
+			unsigned long tmp =
+				(1 << subd->chan_desc->chans[chan].nb_bits) / 2;
+			ni_write_caldac(dev, chan, tmp);
+		}
+	} else {
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+
+		memset(subd->chan_desc,
+		       0, sizeof(struct a4l_channels_desc) + sizeof(struct a4l_channel));
+
+		subd->chan_desc->length = n_chans;
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+
+		type = boardtype.caldac[0];
+
+		subd->chan_desc->chans[0].nb_bits = caldacs[type].n_bits;
+
+		for (chan = 0; chan < n_chans; chan++)
+			ni_write_caldac(dev,
+					chan,
+					(1 << subd->chan_desc->chans[0].nb_bits) / 2);
+	}
+}
+
+static int ni_calib_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	ni_write_caldac(dev, CR_CHAN(insn->chan_desc), data[0]);
+	return 0;
+}
+
+static int ni_calib_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	data[0] = devpriv->caldacs[CR_CHAN(insn->chan_desc)];
+
+	return 0;
+}
+
+static int ni_gpct_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_insn_config(counter, insn);
+}
+
+static int ni_gpct_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_rinsn(counter, insn);
+}
+
+static int ni_gpct_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_winsn(counter, insn);
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_gpct_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	int retval;
+	struct a4l_device *dev = subd->dev;
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	struct mite_dma_descriptor_ring *ring;
+
+	retval = ni_request_gpct_mite_channel(dev,
+					      counter->counter_index,
+					      A4L_INPUT);
+	if (retval) {
+		a4l_err(dev,
+			"ni_gpct_cmd: "
+			"no dma channel available for use by counter\n");
+		return retval;
+	}
+
+	ring = devpriv->gpct_mite_ring[counter->counter_index];
+	retval = a4l_mite_buf_change(ring, subd);
+	if (retval) {
+		a4l_err(dev,
+			"ni_gpct_cmd: "
+			"dma ring configuration failed\n");
+		return retval;
+
+	}
+
+	a4l_ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
+	ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
+	retval = a4l_ni_tio_cmd(counter, cmd);
+
+	return retval;
+}
+
+static int ni_gpct_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_cmdtest(counter, cmd);
+}
+
+static void ni_gpct_cancel(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+
+	a4l_ni_tio_cancel(counter);
+	ni_e_series_enable_second_irq(dev, counter->counter_index, 0);
+	ni_release_gpct_mite_channel(dev, counter->counter_index);
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+
+/*
+ *
+ *  Programmable Function Inputs
+ *
+ */
+
+static int ni_m_series_set_pfi_routing(struct a4l_device *dev,
+				       unsigned int chan, unsigned int source)
+{
+	unsigned int pfi_reg_index;
+	unsigned int array_offset;
+
+	if ((source & 0x1f) != source)
+		return -EINVAL;
+	pfi_reg_index = 1 + chan / 3;
+	array_offset = pfi_reg_index - 1;
+	devpriv->pfi_output_select_reg[array_offset] &=
+		~MSeries_PFI_Output_Select_Mask(chan);
+	devpriv->pfi_output_select_reg[array_offset] |=
+		MSeries_PFI_Output_Select_Bits(chan, source);
+	ni_writew(devpriv->pfi_output_select_reg[array_offset],
+		  M_Offset_PFI_Output_Select(pfi_reg_index));
+	return 2;
+}
+
+static unsigned int ni_old_get_pfi_routing(struct a4l_device *dev,
+					   unsigned int chan)
+{
+	/* pre-m-series boards have fixed signals on pfi pins */
+
+	switch (chan) {
+	case 0:
+		return NI_PFI_OUTPUT_AI_START1;
+		break;
+	case 1:
+		return NI_PFI_OUTPUT_AI_START2;
+		break;
+	case 2:
+		return NI_PFI_OUTPUT_AI_CONVERT;
+		break;
+	case 3:
+		return NI_PFI_OUTPUT_G_SRC1;
+		break;
+	case 4:
+		return NI_PFI_OUTPUT_G_GATE1;
+		break;
+	case 5:
+		return NI_PFI_OUTPUT_AO_UPDATE_N;
+		break;
+	case 6:
+		return NI_PFI_OUTPUT_AO_START1;
+		break;
+	case 7:
+		return NI_PFI_OUTPUT_AI_START_PULSE;
+		break;
+	case 8:
+		return NI_PFI_OUTPUT_G_SRC0;
+		break;
+	case 9:
+		return NI_PFI_OUTPUT_G_GATE0;
+		break;
+	default:
+		__a4l_err("%s: bug, unhandled case in switch.\n",
+			  __FUNCTION__);
+		break;
+	}
+	return 0;
+}
+
+static int ni_old_set_pfi_routing(struct a4l_device *dev,
+				  unsigned int chan, unsigned int source)
+{
+	/* pre-m-series boards have fixed signals on pfi pins */
+	if (source != ni_old_get_pfi_routing(dev, chan))
+		return -EINVAL;
+
+	return 2;
+}
+
+static int ni_set_pfi_routing(struct a4l_device *dev,
+			      unsigned int chan, unsigned int source)
+{
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return ni_m_series_set_pfi_routing(dev, chan, source);
+	else
+		return ni_old_set_pfi_routing(dev, chan, source);
+}
+
+static unsigned int ni_m_series_get_pfi_routing(struct a4l_device *dev,
+						unsigned int chan)
+{
+	const unsigned int array_offset = chan / 3;
+	return MSeries_PFI_Output_Select_Source(chan,
+						devpriv->pfi_output_select_reg[array_offset]);
+}
+
+static unsigned int ni_get_pfi_routing(struct a4l_device *dev, unsigned int chan)
+{
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return ni_m_series_get_pfi_routing(dev, chan);
+	else
+		return ni_old_get_pfi_routing(dev, chan);
+}
+
+static int ni_config_filter(struct a4l_device *dev,
+			    unsigned int pfi_channel, int filter)
+{
+	unsigned int bits;
+	if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) {
+		return -ENOTSUPP;
+	}
+	bits = ni_readl(M_Offset_PFI_Filter);
+	bits &= ~MSeries_PFI_Filter_Select_Mask(pfi_channel);
+	bits |= MSeries_PFI_Filter_Select_Bits(pfi_channel, filter);
+	ni_writel(bits, M_Offset_PFI_Filter);
+	return 0;
+}
+
+static int ni_pfi_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	if (data[0]) {
+		devpriv->pfi_state &= ~data[0];
+		devpriv->pfi_state |= (data[0] & data[1]);
+		ni_writew(devpriv->pfi_state, M_Offset_PFI_DO);
+	}
+
+	data[1] = ni_readw(M_Offset_PFI_DI);
+
+	return 0;
+}
+
+static int ni_pfi_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int chan, *data = (unsigned int *)insn->data;
+
+	if (insn->data_size < sizeof(unsigned int))
+		return -EINVAL;
+
+	chan = CR_CHAN(insn->chan_desc);
+
+	switch (data[0]) {
+	case A4L_OUTPUT:
+		ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 1);
+		break;
+	case A4L_INPUT:
+		ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 0);
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (devpriv->io_bidirection_pin_reg & (1 << chan)) ?
+			A4L_OUTPUT :	A4L_INPUT;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_SET_ROUTING:
+		return ni_set_pfi_routing(dev, chan, data[1]);
+		break;
+	case A4L_INSN_CONFIG_GET_ROUTING:
+		data[1] = ni_get_pfi_routing(dev, chan);
+		break;
+	case A4L_INSN_CONFIG_FILTER:
+		return ni_config_filter(dev, chan, data[1]);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ *
+ *  RTSI Bus Functions
+ *
+ */
+
+/* Find best multiplier/divider to try and get the PLL running at 80 MHz
+ * given an arbitrary frequency input clock */
+static int ni_mseries_get_pll_parameters(unsigned int reference_period_ns,
+					 unsigned int *freq_divider,
+					 unsigned int *freq_multiplier,
+					 unsigned int *actual_period_ns)
+{
+	unsigned div;
+	unsigned best_div = 1;
+	static const unsigned max_div = 0x10;
+	unsigned mult;
+	unsigned best_mult = 1;
+	static const unsigned max_mult = 0x100;
+	static const unsigned pico_per_nano = 1000;
+
+	const unsigned reference_picosec = reference_period_ns * pico_per_nano;
+	/* m-series wants the phased-locked loop to output 80MHz, which is divided by 4 to
+	 * 20 MHz for most timing clocks */
+	static const unsigned target_picosec = 12500;
+	static const unsigned fudge_factor_80_to_20Mhz = 4;
+	int best_period_picosec = 0;
+	for (div = 1; div <= max_div; ++div) {
+		for (mult = 1; mult <= max_mult; ++mult) {
+			unsigned new_period_ps =
+				(reference_picosec * div) / mult;
+			if (abs(new_period_ps - target_picosec) <
+			    abs(best_period_picosec - target_picosec)) {
+				best_period_picosec = new_period_ps;
+				best_div = div;
+				best_mult = mult;
+			}
+		}
+	}
+	if (best_period_picosec == 0) {
+		__a4l_err("%s: bug, failed to find pll parameters\n",
+			  __FUNCTION__);
+		return -EIO;
+	}
+	*freq_divider = best_div;
+	*freq_multiplier = best_mult;
+	*actual_period_ns =
+		(best_period_picosec * fudge_factor_80_to_20Mhz +
+		 (pico_per_nano / 2)) / pico_per_nano;
+	return 0;
+}
+
+static int ni_mseries_set_pll_master_clock(struct a4l_device * dev,
+					   unsigned int source,
+					   unsigned int period_ns)
+{
+	static const unsigned min_period_ns = 50;
+	static const unsigned max_period_ns = 1000;
+	static const unsigned timeout = 1000;
+	unsigned pll_control_bits;
+	unsigned freq_divider;
+	unsigned freq_multiplier;
+	unsigned i;
+	int retval;
+	if (source == NI_MIO_PLL_PXI10_CLOCK)
+		period_ns = 100;
+	/* These limits are somewhat arbitrary, but NI advertises 1 to
+	   20MHz range so we'll use that */
+	if (period_ns < min_period_ns || period_ns > max_period_ns) {
+		a4l_err(dev,
+			"%s: you must specify an input clock frequency "
+			"between %i and %i nanosec "
+			"for the phased-lock loop.\n",
+			__FUNCTION__, min_period_ns, max_period_ns);
+		return -EINVAL;
+	}
+	devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit;
+	devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg,
+			    RTSI_Trig_Direction_Register);
+	pll_control_bits =
+		MSeries_PLL_Enable_Bit | MSeries_PLL_VCO_Mode_75_150MHz_Bits;
+	devpriv->clock_and_fout2 |=
+		MSeries_Timebase1_Select_Bit | MSeries_Timebase3_Select_Bit;
+	devpriv->clock_and_fout2 &= ~MSeries_PLL_In_Source_Select_Mask;
+	switch (source) {
+	case NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK:
+		devpriv->clock_and_fout2 |=
+			MSeries_PLL_In_Source_Select_Star_Trigger_Bits;
+		retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider,
+						       &freq_multiplier, &devpriv->clock_ns);
+		if (retval < 0)
+			return retval;
+		break;
+	case NI_MIO_PLL_PXI10_CLOCK:
+		/* pxi clock is 10MHz */
+		devpriv->clock_and_fout2 |=
+			MSeries_PLL_In_Source_Select_PXI_Clock10;
+		retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider,
+						       &freq_multiplier, &devpriv->clock_ns);
+		if (retval < 0)
+			return retval;
+		break;
+	default:
+	{
+		unsigned rtsi_channel;
+		static const unsigned max_rtsi_channel = 7;
+		for (rtsi_channel = 0; rtsi_channel <= max_rtsi_channel;
+		     ++rtsi_channel) {
+			if (source ==
+			    NI_MIO_PLL_RTSI_CLOCK(rtsi_channel)) {
+				devpriv->clock_and_fout2 |=
+					MSeries_PLL_In_Source_Select_RTSI_Bits
+					(rtsi_channel);
+				break;
+			}
+		}
+		if (rtsi_channel > max_rtsi_channel)
+			return -EINVAL;
+		retval = ni_mseries_get_pll_parameters(period_ns,
+						       &freq_divider, &freq_multiplier,
+						       &devpriv->clock_ns);
+		if (retval < 0)
+			return retval;
+	}
+	break;
+	}
+	ni_writew(devpriv->clock_and_fout2, M_Offset_Clock_and_Fout2);
+	pll_control_bits |=
+		MSeries_PLL_Divisor_Bits(freq_divider) |
+		MSeries_PLL_Multiplier_Bits(freq_multiplier);
+	ni_writew(pll_control_bits, M_Offset_PLL_Control);
+	devpriv->clock_source = source;
+	/* It seems to typically take a few hundred microseconds for PLL to lock */
+	for (i = 0; i < timeout; ++i) {
+		if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit) {
+			break;
+		}
+		udelay(1);
+	}
+	if (i == timeout) {
+		a4l_err(dev,
+			"%s: timed out waiting for PLL to lock "
+			"to reference clock source %i with period %i ns.\n",
+			__FUNCTION__, source, period_ns);
+		return -ETIMEDOUT;
+	}
+	return 3;
+}
+
+static int ni_set_master_clock(struct a4l_device *dev,
+			       unsigned int source, unsigned int period_ns)
+{
+	if (source == NI_MIO_INTERNAL_CLOCK) {
+		devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit;
+		devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg,
+				    RTSI_Trig_Direction_Register);
+		devpriv->clock_ns = TIMEBASE_1_NS;
+		if (boardtype.reg_type & ni_reg_m_series_mask) {
+			devpriv->clock_and_fout2 &=
+				~(MSeries_Timebase1_Select_Bit |
+				  MSeries_Timebase3_Select_Bit);
+			ni_writew(devpriv->clock_and_fout2,
+				  M_Offset_Clock_and_Fout2);
+			ni_writew(0, M_Offset_PLL_Control);
+		}
+		devpriv->clock_source = source;
+	} else {
+		if (boardtype.reg_type & ni_reg_m_series_mask) {
+			return ni_mseries_set_pll_master_clock(dev, source,
+							       period_ns);
+		} else {
+			if (source == NI_MIO_RTSI_CLOCK) {
+				devpriv->rtsi_trig_direction_reg |=
+					Use_RTSI_Clock_Bit;
+				devpriv->stc_writew(dev,
+						    devpriv->rtsi_trig_direction_reg,
+						    RTSI_Trig_Direction_Register);
+				if (devpriv->clock_ns == 0) {
+					a4l_err(dev,
+						"%s: we don't handle an "
+						"unspecified clock period "
+						"correctly yet, returning error.\n",
+						__FUNCTION__);
+					return -EINVAL;
+				} else {
+					devpriv->clock_ns = period_ns;
+				}
+				devpriv->clock_source = source;
+			} else
+				return -EINVAL;
+		}
+	}
+	return 3;
+}
+
+static void ni_rtsi_init(struct a4l_device * dev)
+{
+	/* Initialise the RTSI bus signal switch to a default state */
+
+	/* Set clock mode to internal */
+	devpriv->clock_and_fout2 = MSeries_RTSI_10MHz_Bit;
+	if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0) {
+		a4l_err(dev, "ni_set_master_clock failed, bug?");
+	}
+
+	/* Default internal lines routing to RTSI bus lines */
+	devpriv->rtsi_trig_a_output_reg =
+		RTSI_Trig_Output_Bits(0, NI_RTSI_OUTPUT_ADR_START1) |
+		RTSI_Trig_Output_Bits(1, NI_RTSI_OUTPUT_ADR_START2) |
+		RTSI_Trig_Output_Bits(2, NI_RTSI_OUTPUT_SCLKG) |
+		RTSI_Trig_Output_Bits(3, NI_RTSI_OUTPUT_DACUPDN);
+	devpriv->stc_writew(dev, devpriv->rtsi_trig_a_output_reg,
+			    RTSI_Trig_A_Output_Register);
+	devpriv->rtsi_trig_b_output_reg =
+		RTSI_Trig_Output_Bits(4, NI_RTSI_OUTPUT_DA_START1) |
+		RTSI_Trig_Output_Bits(5, NI_RTSI_OUTPUT_G_SRC0) |
+		RTSI_Trig_Output_Bits(6, NI_RTSI_OUTPUT_G_GATE0);
+
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		devpriv->rtsi_trig_b_output_reg |=
+			RTSI_Trig_Output_Bits(7, NI_RTSI_OUTPUT_RTSI_OSC);
+	devpriv->stc_writew(dev, devpriv->rtsi_trig_b_output_reg,
+			    RTSI_Trig_B_Output_Register);
+}
+
+int a4l_ni_E_init(struct a4l_device *dev)
+{
+	int ret;
+	unsigned int j, counter_variant;
+	struct a4l_subdevice *subd;
+
+	if (boardtype.n_aochan > MAX_N_AO_CHAN) {
+		a4l_err(dev, "bug! boardtype.n_aochan > MAX_N_AO_CHAN\n");
+		return -EINVAL;
+	}
+
+	/* analog input subdevice */
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: starting attach procedure...\n");
+
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering AI subdevice...\n");
+
+	if (boardtype.n_adchan) {
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AI: %d channels\n", boardtype.n_adchan);
+
+		subd->flags = A4L_SUBD_AI | A4L_SUBD_CMD | A4L_SUBD_MMAP;
+		subd->rng_desc = ni_range_lkup[boardtype.gainlkup];
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = boardtype.n_adchan;
+		subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_DIFF;
+		if (boardtype.reg_type != ni_reg_611x)
+			subd->chan_desc->chans[0].flags |= A4L_CHAN_AREF_GROUND |
+				A4L_CHAN_AREF_COMMON | A4L_CHAN_AREF_OTHER;
+		subd->chan_desc->chans[0].nb_bits = boardtype.adbits;
+
+		subd->insn_read = ni_ai_insn_read;
+		subd->insn_config = ni_ai_insn_config;
+		subd->do_cmdtest = ni_ai_cmdtest;
+		subd->do_cmd = ni_ai_cmd;
+		subd->cancel = ni_ai_reset;
+		subd->trigger = ni_ai_inttrig;
+
+		subd->munge = (boardtype.adbits > 16) ?
+			ni_ai_munge32 : ni_ai_munge16;
+
+		subd->cmd_mask = &mio_ai_cmd_mask;
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AI subdevice not present\n");
+		subd->flags = A4L_SUBD_UNUSED;
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_AI_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: AI subdevice registered\n");
+
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering AO subdevice...\n");
+
+	/* analog output subdevice */
+	if (boardtype.n_aochan) {
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AO: %d channels\n", boardtype.n_aochan);
+
+		subd->flags = A4L_SUBD_AO;
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = boardtype.n_aochan;
+		subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_GROUND;
+		subd->chan_desc->chans[0].nb_bits = boardtype.aobits;
+
+		subd->rng_desc = boardtype.ao_range_table;
+
+		subd->insn_read = ni_ao_insn_read;
+		if (boardtype.reg_type & ni_reg_6xxx_mask)
+			subd->insn_write = &ni_ao_insn_write_671x;
+		else
+			subd->insn_write = &ni_ao_insn_write;
+
+
+		if (boardtype.ao_fifo_depth) {
+			subd->flags |= A4L_SUBD_CMD | A4L_SUBD_MMAP;
+			subd->do_cmd = &ni_ao_cmd;
+			subd->cmd_mask = &mio_ao_cmd_mask;
+			subd->do_cmdtest = &ni_ao_cmdtest;
+			subd->trigger = ni_ao_inttrig;
+			if ((boardtype.reg_type & ni_reg_m_series_mask) == 0)
+				subd->munge = &ni_ao_munge;
+		}
+
+		subd->cancel = &ni_ao_reset;
+
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AO subdevice not present\n");
+		subd->flags = A4L_SUBD_UNUSED;
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_AO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: AO subdevice registered\n");
+
+	if ((boardtype.reg_type & ni_reg_67xx_mask))
+		init_ao_67xx(dev);
+
+	/* digital i/o subdevice */
+
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering DIO subdevice...\n");
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: DIO: %d channels\n",
+		boardtype.num_p0_dio_channels);
+
+	subd->flags = A4L_SUBD_DIO;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = boardtype.num_p0_dio_channels;
+	subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_GROUND;
+	subd->chan_desc->chans[0].nb_bits = 1;
+	devpriv->io_bits = 0; /* all bits input */
+
+	subd->rng_desc = &range_digital;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+
+		if (subd->chan_desc->length == 8)
+			subd->insn_bits = ni_m_series_dio_insn_bits_8;
+		else
+			subd->insn_bits = ni_m_series_dio_insn_bits_32;
+
+		subd->insn_config = ni_m_series_dio_insn_config;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: DIO: command feature available\n");
+
+		subd->flags |= A4L_SUBD_CMD;
+		subd->do_cmd = ni_cdio_cmd;
+		subd->do_cmdtest = ni_cdio_cmdtest;
+		subd->cmd_mask = &mio_dio_cmd_mask;
+		subd->cancel = ni_cdio_cancel;
+		subd->trigger = ni_cdo_inttrig;
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		ni_writel(CDO_Reset_Bit | CDI_Reset_Bit, M_Offset_CDIO_Command);
+		ni_writel(devpriv->io_bits, M_Offset_DIO_Direction);
+	} else {
+
+		subd->insn_bits = ni_dio_insn_bits;
+		subd->insn_config = ni_dio_insn_config;
+		devpriv->dio_control = DIO_Pins_Dir(devpriv->io_bits);
+		ni_writew(devpriv->dio_control, DIO_Control_Register);
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_DIO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: DIO subdevice registered\n");
+
+	/* 8255 device */
+	subd = a4l_alloc_subd(sizeof(subd_8255_t), NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering 8255 subdevice...\n");
+
+	if (boardtype.has_8255) {
+		devpriv->subd_8255.cb_arg = (unsigned long)dev;
+		devpriv->subd_8255.cb_func = ni_8255_callback;
+		a4l_subdev_8255_init(subd);
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: 8255 subdevice not present\n");
+		subd->flags = A4L_SUBD_UNUSED;
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_8255_DIO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: 8255 subdevice registered\n");
+
+	/* formerly general purpose counter/timer device, but no longer used */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	subd->flags = A4L_SUBD_UNUSED;
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_UNUSED_SUBDEV)
+		return ret;
+
+	/* calibration subdevice -- ai and ao */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering calib subdevice...\n");
+
+	subd->flags = A4L_SUBD_CALIB;
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		/* internal PWM analog output
+		   used for AI nonlinearity calibration */
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: calib: M series calibration");
+		subd->insn_config = ni_m_series_pwm_config;
+		ni_writel(0x0, M_Offset_Cal_PWM);
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		/* internal PWM analog output
+		   used for AI nonlinearity calibration */
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: calib: 6143 calibration");
+		subd->insn_config = ni_6143_pwm_config;
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: calib: common calibration");
+		subd->insn_read = ni_calib_insn_read;
+		subd->insn_write = ni_calib_insn_write;
+		caldac_setup(dev, subd);
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_CALIBRATION_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: calib subdevice registered\n");
+
+	/* EEPROM */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering EEPROM subdevice...\n");
+
+	subd->flags = A4L_SUBD_MEMORY;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 8;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		subd->chan_desc->length = M_SERIES_EEPROM_SIZE;
+		subd->insn_read = ni_m_series_eeprom_insn_read;
+	} else {
+		subd->chan_desc->length = 512;
+		subd->insn_read = ni_eeprom_insn_read;
+	}
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: EEPROM: size = %lu\n", subd->chan_desc->length);
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_EEPROM_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: EEPROM subdevice registered\n");
+
+	/* PFI */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering PFI(DIO) subdevice...\n");
+
+	subd->flags = A4L_SUBD_DIO;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 1;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		unsigned int i;
+		subd->chan_desc->length = 16;
+		ni_writew(devpriv->dio_state, M_Offset_PFI_DO);
+		for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
+			ni_writew(devpriv->pfi_output_select_reg[i],
+				  M_Offset_PFI_Output_Select(i + 1));
+		}
+	} else
+		subd->chan_desc->length = 10;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: PFI: %lu bits...\n", subd->chan_desc->length);
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		subd->insn_bits = ni_pfi_insn_bits;
+	}
+
+	subd->insn_config = ni_pfi_insn_config;
+	ni_set_bits(dev, IO_Bidirection_Pin_Register, ~0, 0);
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_PFI_DIO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: PFI subdevice registered\n");
+
+	/* cs5529 calibration adc */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+#if 0 /* TODO: add subdevices callbacks */
+	subd->flags = A4L_SUBD_AI;
+
+	if (boardtype.reg_type & ni_reg_67xx_mask) {
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = boardtype.n_aochan;
+		subd->chan_desc->chans[0].flags = 0;
+		subd->chan_desc->chans[0].nb_bits = 16;
+
+		/* one channel for each analog output channel */
+		subd->rng_desc = &a4l_range_unknown;	/* XXX */
+		s->insn_read = cs5529_ai_insn_read;
+		init_cs5529(dev);
+	} else
+#endif /* TODO: add subdevices callbacks */
+		subd->flags = A4L_SUBD_UNUSED;
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_CS5529_CALIBRATION_SUBDEV)
+		return ret;
+
+	/* Serial */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering serial subdevice...\n");
+
+	subd->flags = A4L_SUBD_SERIAL;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = 1;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 8;
+
+	subd->insn_config = ni_serial_insn_config;
+
+	devpriv->serial_interval_ns = 0;
+	devpriv->serial_hw_mode = 0;
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_SERIAL_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: serial subdevice registered\n");
+
+	/* RTSI */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+#if 1 /* TODO: add RTSI subdevice */
+	subd->flags = A4L_SUBD_UNUSED;
+	ni_rtsi_init(dev);
+
+#else /* TODO: add RTSI subdevice */
+	subd->flags = A4L_SUBD_DIO;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = 8;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 1;
+
+	subd->insn_bits = ni_rtsi_insn_bits;
+	subd->insn_config = ni_rtsi_insn_config;
+	ni_rtsi_init(dev);
+
+#endif /* TODO: add RTSI subdevice */
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_RTSI_SUBDEV)
+		return ret;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		counter_variant = ni_gpct_variant_m_series;
+	} else {
+		counter_variant = ni_gpct_variant_e_series;
+	}
+	devpriv->counter_dev =
+		a4l_ni_gpct_device_construct(dev,
+					     &ni_gpct_write_register,
+					     &ni_gpct_read_register,
+					     counter_variant, NUM_GPCT);
+
+	/* General purpose counters */
+	for (j = 0; j < NUM_GPCT; ++j) {
+		struct ni_gpct *counter;
+
+		subd = a4l_alloc_subd(sizeof(struct ni_gpct), NULL);
+		if(subd == NULL)
+			return -ENOMEM;
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: registering GPCT[%d] subdevice...\n", j);
+
+		subd->flags = A4L_SUBD_COUNTER;
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = 3;
+		subd->chan_desc->chans[0].flags = 0;
+
+		if (boardtype.reg_type & ni_reg_m_series_mask)
+			subd->chan_desc->chans[0].nb_bits = 32;
+		else
+			subd->chan_desc->chans[0].nb_bits = 24;
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: GPCT[%d]: %lu bits\n",
+			j, subd->chan_desc->chans[0].nb_bits);
+
+		subd->insn_read = ni_gpct_insn_read;
+		subd->insn_write = ni_gpct_insn_write;
+		subd->insn_config = ni_gpct_insn_config;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: GPCT[%d]: command feature available\n", j);
+		subd->flags |= A4L_SUBD_CMD;
+		subd->cmd_mask = &a4l_ni_tio_cmd_mask;
+		subd->do_cmd = ni_gpct_cmd;
+		subd->do_cmdtest = ni_gpct_cmdtest;
+		subd->cancel = ni_gpct_cancel;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		counter = (struct ni_gpct *)subd->priv;
+		rtdm_lock_init(&counter->lock);
+		counter->chip_index = 0;
+		counter->counter_index = j;
+		counter->counter_dev = devpriv->counter_dev;
+		devpriv->counter_dev->counters[j] = counter;
+
+		a4l_ni_tio_init_counter(counter);
+
+		ret = a4l_add_subd(dev, subd);
+		if(ret != NI_GPCT_SUBDEV(j))
+			return ret;
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: GCPT[%d] subdevice registered\n", j);
+	}
+
+	/* Frequency output */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering counter subdevice...\n");
+
+	subd->flags = A4L_SUBD_COUNTER;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = 1;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 4;
+
+	subd->insn_read = ni_freq_out_insn_read;
+	subd->insn_write = ni_freq_out_insn_write;
+	subd->insn_config = ni_freq_out_insn_config;
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_FREQ_OUT_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: counter subdevice registered\n");
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: initializing AI...\n");
+
+	/* ai configuration */
+	ni_ai_reset(a4l_get_subd(dev, NI_AI_SUBDEV));
+	if ((boardtype.reg_type & ni_reg_6xxx_mask) == 0) {
+		// BEAM is this needed for PCI-6143 ??
+		devpriv->clock_and_fout =
+			Slow_Internal_Time_Divide_By_2 |
+			Slow_Internal_Timebase |
+			Clock_To_Board_Divide_By_2 |
+			Clock_To_Board |
+			AI_Output_Divide_By_2 | AO_Output_Divide_By_2;
+	} else {
+		devpriv->clock_and_fout =
+			Slow_Internal_Time_Divide_By_2 |
+			Slow_Internal_Timebase |
+			Clock_To_Board_Divide_By_2 | Clock_To_Board;
+	}
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: AI initialization OK\n");
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: initializing A0...\n");
+
+	/* analog output configuration */
+	ni_ao_reset(a4l_get_subd(dev, NI_AO_SUBDEV));
+
+	if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) {
+		devpriv->stc_writew(dev,
+				    (devpriv->irq_polarity ? Interrupt_Output_Polarity : 0) |
+				    (Interrupt_Output_On_3_Pins & 0) | Interrupt_A_Enable |
+				    Interrupt_B_Enable |
+				    Interrupt_A_Output_Select(devpriv->irq_pin) |
+				    Interrupt_B_Output_Select(devpriv->irq_pin),
+				    Interrupt_Control_Register);
+	}
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: A0 initialization OK\n");
+
+	/* DMA setup */
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: DMA setup\n");
+
+	ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select);
+	ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select);
+
+	if (boardtype.reg_type & ni_reg_6xxx_mask) {
+		ni_writeb(0, Magic_611x);
+	} else if (boardtype.reg_type & ni_reg_m_series_mask) {
+		int channel;
+		for (channel = 0; channel < boardtype.n_aochan; ++channel) {
+			ni_writeb(0xf, M_Offset_AO_Waveform_Order(channel));
+			ni_writeb(0x0,
+				  M_Offset_AO_Reference_Attenuation(channel));
+		}
+		ni_writeb(0x0, M_Offset_AO_Calibration);
+	}
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: attach procedure complete\n");
+
+	return 0;
+}
+
+MODULE_DESCRIPTION("Analogy support for NI DAQ-STC based boards");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_limited);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_limited14);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_bipolar4);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_611x);
+EXPORT_SYMBOL_GPL(a4l_range_ni_M_ai_622x);
+EXPORT_SYMBOL_GPL(a4l_range_ni_M_ai_628x);
+EXPORT_SYMBOL_GPL(a4l_range_ni_S_ai_6143);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ao_ext);
+EXPORT_SYMBOL_GPL(a4l_ni_E_interrupt);
+EXPORT_SYMBOL_GPL(a4l_ni_E_init);
+++ linux-patched/drivers/xenomai/analogy/national_instruments/mite.c	2022-03-21 12:58:31.123872052 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/mite.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI Mite PCI interface chip
+ *
+ * Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The NI Mite driver was originally written by Tomasz Motylewski
+ * <...>, and ported to comedi by ds.
+ *
+ * References for specifications:
+ *
+ * 321747b.pdf  Register Level Programmer Manual (obsolete)
+ * 321747c.pdf  Register Level Programmer Manual (new)
+ * DAQ-STC reference manual
+ *
+ * Other possibly relevant info:
+ *
+ * 320517c.pdf  User manual (obsolete)
+ * 320517f.pdf  User manual (new)
+ * 320889a.pdf  delete
+ * 320906c.pdf  maximum signal ratings
+ * 321066a.pdf  about 16x
+ * 321791a.pdf  discontinuation of at-mio-16e-10 rev. c
+ * 321808a.pdf  about at-mio-16e-10 rev P
+ * 321837a.pdf  discontinuation of at-mio-16de-10 rev d
+ * 321838a.pdf  about at-mio-16de-10 rev N
+ *
+ * ISSUES:
+ */
+
+#include <linux/module.h>
+#include "mite.h"
+
+#ifdef CONFIG_DEBUG_MITE
+#define MDPRINTK(fmt, args...) rtdm_printk(fmt, ##args)
+#else /* !CONFIG_DEBUG_MITE */
+#define MDPRINTK(fmt, args...)
+#endif /* CONFIG_DEBUG_MITE */
+
+static LIST_HEAD(mite_devices);
+
+static struct pci_device_id mite_id[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_NATINST, PCI_ANY_ID), },
+	{0, }
+};
+
+static int mite_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	int i, err = 0;
+	struct mite_struct *mite;
+
+	mite = kmalloc(sizeof(struct mite_struct), GFP_KERNEL);
+	if(mite == NULL)
+		return -ENOMEM;
+
+	memset(mite, 0, sizeof(struct mite_struct));
+
+	rtdm_lock_init(&mite->lock);
+
+	mite->pcidev = dev;
+	if (pci_enable_device(dev) < 0) {
+		__a4l_err("error enabling mite\n");
+		err = -EIO;
+		goto out;
+	}
+
+	for(i = 0; i < MAX_MITE_DMA_CHANNELS; i++) {
+		mite->channels[i].mite = mite;
+		mite->channels[i].channel = i;
+		mite->channels[i].done = 1;
+	}
+
+	list_add(&mite->list, &mite_devices);
+
+out:
+	if (err < 0)
+		kfree(mite);
+
+	return err;
+}
+
+static void mite_remove(struct pci_dev *dev)
+{
+	struct list_head *this;
+
+	list_for_each(this, &mite_devices) {
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		if(mite->pcidev == dev) {
+			list_del(this);
+			kfree(mite);
+			break;
+		}
+	}
+}
+
+static struct pci_driver mite_driver = {
+	.name = "analogy_mite",
+	.id_table = mite_id,
+	.probe = mite_probe,
+	.remove = mite_remove,
+};
+
+int a4l_mite_setup(struct mite_struct *mite, int use_iodwbsr_1)
+{
+	unsigned long length;
+	resource_size_t addr;
+	int i;
+	u32 csigr_bits;
+	unsigned unknown_dma_burst_bits;
+
+	__a4l_dbg(1, drv_dbg, "starting setup...\n");
+
+	pci_set_master(mite->pcidev);
+
+	if (pci_request_regions(mite->pcidev, "mite")) {
+		__a4l_err("failed to request mite io regions\n");
+		return -EIO;
+	};
+
+	/* The PCI BAR0 is the Mite */
+	addr = pci_resource_start(mite->pcidev, 0);
+	length = pci_resource_len(mite->pcidev, 0);
+	mite->mite_phys_addr = addr;
+	mite->mite_io_addr = ioremap(addr, length);
+	if (!mite->mite_io_addr) {
+		__a4l_err("failed to remap mite io memory address\n");
+		pci_release_regions(mite->pcidev);
+		return -ENOMEM;
+	}
+
+	__a4l_dbg(1, drv_dbg, "bar0(mite) 0x%08llx mapped to %p\n",
+		  (unsigned long long)mite->mite_phys_addr,
+		  mite->mite_io_addr);
+
+
+	/* The PCI BAR1 is the DAQ */
+	addr = pci_resource_start(mite->pcidev, 1);
+	length = pci_resource_len(mite->pcidev, 1);
+	mite->daq_phys_addr = addr;
+	mite->daq_io_addr = ioremap(mite->daq_phys_addr, length);
+	if (!mite->daq_io_addr) {
+		__a4l_err("failed to remap daq io memory address\n");
+		pci_release_regions(mite->pcidev);
+		return -ENOMEM;
+	}
+
+	__a4l_dbg(1, drv_dbg, "bar0(daq) 0x%08llx mapped to %p\n",
+		  (unsigned long long)mite->daq_phys_addr,
+		  mite->daq_io_addr);
+
+	if (use_iodwbsr_1) {
+		__a4l_dbg(1, drv_dbg, "using I/O Window Base Size register 1\n");
+		writel(0, mite->mite_io_addr + MITE_IODWBSR);
+		writel(mite->
+		       daq_phys_addr | WENAB |
+		       MITE_IODWBSR_1_WSIZE_bits(length),
+		       mite->mite_io_addr + MITE_IODWBSR_1);
+		writel(0, mite->mite_io_addr + MITE_IODWCR_1);
+	} else {
+		writel(mite->daq_phys_addr | WENAB,
+		       mite->mite_io_addr + MITE_IODWBSR);
+	}
+
+	/* Make sure dma bursts work.  I got this from running a bus analyzer
+	   on a pxi-6281 and a pxi-6713.  6713 powered up with register value
+	   of 0x61f and bursts worked.  6281 powered up with register value of
+	   0x1f and bursts didn't work.  The NI windows driver reads the register,
+	   then does a bitwise-or of 0x600 with it and writes it back.
+	*/
+	unknown_dma_burst_bits =
+		readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
+	unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
+	writel(unknown_dma_burst_bits,
+	       mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
+
+	csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
+	mite->num_channels = mite_csigr_dmac(csigr_bits);
+	if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
+		__a4l_err("MITE: bug? chip claims to have %i dma channels. "
+			  "Setting to %i.\n",
+			  mite->num_channels, MAX_MITE_DMA_CHANNELS);
+		mite->num_channels = MAX_MITE_DMA_CHANNELS;
+	}
+
+	__a4l_dbg(1, drv_dbg, " version = %i, type = %i, mite mode = %i, "
+		  "interface mode = %i\n",
+		  mite_csigr_version(csigr_bits),
+		  mite_csigr_type(csigr_bits),
+		  mite_csigr_mmode(csigr_bits),
+		  mite_csigr_imode(csigr_bits));
+	__a4l_dbg(1, drv_dbg, " num channels = %i, write post fifo depth = %i, "
+		  "wins = %i, iowins = %i\n",
+		  mite_csigr_dmac(csigr_bits),
+		  mite_csigr_wpdep(csigr_bits),
+		  mite_csigr_wins(csigr_bits),
+		  mite_csigr_iowins(csigr_bits));
+
+	for (i = 0; i < mite->num_channels; i++) {
+		/* Registers the channel as a free one */
+		mite->channel_allocated[i] = 0;
+		/* Reset the channel */
+		writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
+		/* Disable interrupts */
+		writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
+		       CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+		       CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+		       mite->mite_io_addr + MITE_CHCR(i));
+
+		__a4l_dbg(1, drv_dbg, "channel[%d] initialized\n", i);
+	}
+
+	mite->used = 1;
+
+	return 0;
+}
+
+void a4l_mite_unsetup(struct mite_struct *mite)
+{
+	if (!mite)
+		return;
+
+	if (mite->mite_io_addr) {
+		iounmap(mite->mite_io_addr);
+		mite->mite_io_addr = NULL;
+	}
+
+	if (mite->daq_io_addr) {
+		iounmap(mite->daq_io_addr);
+		mite->daq_io_addr = NULL;
+	}
+
+	if(mite->used)
+		pci_release_regions( mite->pcidev );
+
+	mite->used = 0;
+}
+
+void a4l_mite_list_devices(void)
+{
+	struct list_head *this;
+
+	printk("Analogy: MITE: Available NI device IDs:");
+	list_for_each(this, &mite_devices) {
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		printk(" 0x%04x", mite_device_id(mite));
+		if(mite->used)
+			printk("(used)");
+	}
+
+	printk("\n");
+}
+
+
+
+struct mite_struct * a4l_mite_find_device(int bus, 
+					  int slot, unsigned short device_id)
+{
+	struct list_head *this;
+
+	list_for_each(this, &mite_devices) {
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		if(mite->pcidev->device != device_id)
+			continue;
+
+		if((bus <= 0 && slot <= 0) ||
+		   (bus == mite->pcidev->bus->number &&
+		    slot == PCI_SLOT(mite->pcidev->devfn)))
+			return mite;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(a4l_mite_find_device);
+
+struct mite_channel *
+a4l_mite_request_channel_in_range(struct mite_struct *mite,
+				  struct mite_dma_descriptor_ring *ring,
+				  unsigned min_channel, unsigned max_channel)
+{
+	int i;
+	unsigned long flags;
+	struct mite_channel *channel = NULL;
+
+	__a4l_dbg(1, drv_dbg, " min_channel = %u, max_channel = %u\n",
+		  min_channel, max_channel);
+
+	/* spin lock so a4l_mite_release_channel can be called safely
+	   from interrupts */
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	for (i = min_channel; i <= max_channel; ++i) {
+
+	__a4l_dbg(1, drv_dbg, " channel[%d] allocated = %d\n",
+		  i, mite->channel_allocated[i]);
+
+		if (mite->channel_allocated[i] == 0) {
+			mite->channel_allocated[i] = 1;
+			channel = &mite->channels[i];
+			channel->ring = ring;
+			break;
+		}
+	}
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+	return channel;
+}
+
+void a4l_mite_release_channel(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	unsigned long flags;
+
+	/* Spin lock to prevent races with mite_request_channel */
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	if (mite->channel_allocated[mite_chan->channel]) {
+		/* disable all channel's interrupts */
+		writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
+		       CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
+		       CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+		       CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+		       mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
+		a4l_mite_dma_disarm(mite_chan);
+		mite_dma_reset(mite_chan);
+		mite->channel_allocated[mite_chan->channel] = 0;
+		mite_chan->ring = NULL;
+		mmiowb();
+	}
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+}
+
+void a4l_mite_dma_arm(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	int chor;
+	unsigned long flags;
+
+	MDPRINTK("a4l_mite_dma_arm ch%i\n", mite_chan->channel);
+	/* Memory barrier is intended to insure any twiddling with the buffer
+	   is done before writing to the mite to arm dma transfer */
+	smp_mb();
+	/* arm */
+	chor = CHOR_START;
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	mite_chan->done = 0;
+	writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+	mmiowb();
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+}
+
+void a4l_mite_dma_disarm(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	unsigned chor;
+
+	/* disarm */
+	chor = CHOR_ABORT;
+	writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+}
+
+int a4l_mite_buf_change(struct mite_dma_descriptor_ring *ring, struct a4l_subdevice *subd)
+{
+	struct a4l_buffer *buf = subd->buf;
+	unsigned int n_links;
+	int i;
+
+	if (ring->descriptors) {
+		pci_free_consistent(ring->pcidev,
+				    ring->n_links * sizeof(struct mite_dma_descriptor),
+				    ring->descriptors, ring->descriptors_dma_addr);
+	}
+	ring->descriptors = NULL;
+	ring->descriptors_dma_addr = 0;
+	ring->n_links = 0;
+
+	if (buf->size == 0) {
+		return 0;
+	}
+	n_links = buf->size >> PAGE_SHIFT;
+
+	MDPRINTK("ring->pcidev=%p, n_links=0x%04x\n", ring->pcidev, n_links);
+
+	ring->descriptors =
+		pci_alloc_consistent(ring->pcidev,
+				     n_links * sizeof(struct mite_dma_descriptor),
+				     &ring->descriptors_dma_addr);
+	if (!ring->descriptors) {
+		printk("MITE: ring buffer allocation failed\n");
+		return -ENOMEM;
+	}
+	ring->n_links = n_links;
+
+	for (i = 0; i < n_links; i++) {
+		ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
+		ring->descriptors[i].addr = cpu_to_le32(buf->pg_list[i]);
+		ring->descriptors[i].next =
+			cpu_to_le32(ring->descriptors_dma_addr +
+				    (i + 1) * sizeof(struct mite_dma_descriptor));
+	}
+
+	ring->descriptors[n_links - 1].next =
+		cpu_to_le32(ring->descriptors_dma_addr);
+
+	/* Barrier is meant to insure that all the writes to the dma descriptors
+	   have completed before the dma controller is commanded to read them */
+	smp_wmb();
+
+	return 0;
+}
+
+void a4l_mite_prep_dma(struct mite_channel *mite_chan,
+		   unsigned int num_device_bits, unsigned int num_memory_bits)
+{
+	unsigned int chor, chcr, mcr, dcr, lkcr;
+	struct mite_struct *mite = mite_chan->mite;
+
+	MDPRINTK("a4l_mite_prep_dma ch%i\n", mite_chan->channel);
+
+	/* reset DMA and FIFO */
+	chor = CHOR_DMARESET | CHOR_FRESET;
+	writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+
+	/* short link chaining mode */
+	chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
+		CHCR_BURSTEN;
+	/*
+	 * Link Complete Interrupt: interrupt every time a link
+	 * in MITE_RING is completed. This can generate a lot of
+	 * extra interrupts, but right now we update the values
+	 * of buf_int_ptr and buf_int_count at each interrupt.  A
+	 * better method is to poll the MITE before each user
+	 * "read()" to calculate the number of bytes available.
+	 */
+	chcr |= CHCR_SET_LC_IE;
+	if (num_memory_bits == 32 && num_device_bits == 16) {
+		/* Doing a combined 32 and 16 bit byteswap gets the 16
+		   bit samples into the fifo in the right order.
+		   Tested doing 32 bit memory to 16 bit device
+		   transfers to the analog out of a pxi-6281, which
+		   has mite version = 1, type = 4.  This also works
+		   for dma reads from the counters on e-series boards.
+		*/
+		chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY;
+	}
+
+	if (mite_chan->dir == A4L_INPUT) {
+		chcr |= CHCR_DEV_TO_MEM;
+	}
+	writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
+
+	/* to/from memory */
+	mcr = CR_RL(64) | CR_ASEQUP;
+	switch (num_memory_bits) {
+	case 8:
+		mcr |= CR_PSIZE8;
+		break;
+	case 16:
+		mcr |= CR_PSIZE16;
+		break;
+	case 32:
+		mcr |= CR_PSIZE32;
+		break;
+	default:
+		__a4l_err("MITE: bug! "
+			  "invalid mem bit width for dma transfer\n");
+		break;
+	}
+	writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel));
+
+	/* from/to device */
+	dcr = CR_RL(64) | CR_ASEQUP;
+	dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel);
+	switch (num_device_bits) {
+	case 8:
+		dcr |= CR_PSIZE8;
+		break;
+	case 16:
+		dcr |= CR_PSIZE16;
+		break;
+	case 32:
+		dcr |= CR_PSIZE32;
+		break;
+	default:
+		__a4l_info("MITE: bug! "
+			   "invalid dev bit width for dma transfer\n");
+		break;
+	}
+	writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel));
+
+	/* reset the DAR */
+	writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+
+	/* the link is 32bits */
+	lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32;
+	writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel));
+
+	/* starting address for link chaining */
+	writel(mite_chan->ring->descriptors_dma_addr,
+	       mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
+
+	MDPRINTK("exit a4l_mite_prep_dma\n");
+}
+
+u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+}
+
+u32 a4l_mite_bytes_in_transit(struct mite_channel * mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	return readl(mite->mite_io_addr +
+		     MITE_FCR(mite_chan->channel)) & 0x000000FF;
+}
+
+/* Returns lower bound for number of bytes transferred from device to memory */
+u32 a4l_mite_bytes_written_to_memory_lb(struct mite_channel * mite_chan)
+{
+	u32 device_byte_count;
+
+	device_byte_count = mite_device_bytes_transferred(mite_chan);
+	return device_byte_count - a4l_mite_bytes_in_transit(mite_chan);
+}
+
+/* Returns upper bound for number of bytes transferred from device to memory */
+u32 a4l_mite_bytes_written_to_memory_ub(struct mite_channel * mite_chan)
+{
+	u32 in_transit_count;
+
+	in_transit_count = a4l_mite_bytes_in_transit(mite_chan);
+	return mite_device_bytes_transferred(mite_chan) - in_transit_count;
+}
+
+/* Returns lower bound for number of bytes read from memory for transfer to device */
+u32 a4l_mite_bytes_read_from_memory_lb(struct mite_channel * mite_chan)
+{
+	u32 device_byte_count;
+
+	device_byte_count = mite_device_bytes_transferred(mite_chan);
+	return device_byte_count + a4l_mite_bytes_in_transit(mite_chan);
+}
+
+/* Returns upper bound for number of bytes read from memory for transfer to device */
+u32 a4l_mite_bytes_read_from_memory_ub(struct mite_channel * mite_chan)
+{
+	u32 in_transit_count;
+
+	in_transit_count = a4l_mite_bytes_in_transit(mite_chan);
+	return mite_device_bytes_transferred(mite_chan) + in_transit_count;
+}
+
+int a4l_mite_sync_input_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd)
+{
+	unsigned int nbytes_lb, nbytes_ub;
+
+	nbytes_lb = a4l_mite_bytes_written_to_memory_lb(mite_chan);
+	nbytes_ub = a4l_mite_bytes_written_to_memory_ub(mite_chan);
+
+	if(a4l_buf_prepare_absput(subd, nbytes_ub) != 0) {
+		__a4l_err("MITE: DMA overwrite of free area\n");
+		return -EPIPE;
+	}
+
+	return a4l_buf_commit_absput(subd, nbytes_lb);
+}
+
+int a4l_mite_sync_output_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd)
+{
+	struct a4l_buffer *buf = subd->buf;
+	unsigned int nbytes_ub, nbytes_lb;
+	int err;
+
+	nbytes_lb = a4l_mite_bytes_read_from_memory_lb(mite_chan);
+	nbytes_ub = a4l_mite_bytes_read_from_memory_ub(mite_chan);
+
+	err = a4l_buf_prepare_absget(subd, nbytes_ub);
+	if(err < 0) {
+		__a4l_info("MITE: DMA underrun\n");
+		return -EPIPE;
+	}
+
+	err = a4l_buf_commit_absget(subd, nbytes_lb);
+
+	/* If the MITE has already transfered more than required, we
+	   can disable it */
+	if (test_bit(A4L_BUF_EOA_NR, &buf->flags))
+		writel(CHOR_STOP,
+		       mite_chan->mite->mite_io_addr +
+		       MITE_CHOR(mite_chan->channel));
+
+	return err;
+}
+
+u32 a4l_mite_get_status(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	u32 status;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel));
+	if (status & CHSR_DONE) {
+		mite_chan->done = 1;
+		writel(CHOR_CLRDONE,
+		       mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+	}
+	mmiowb();
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+	return status;
+}
+
+int a4l_mite_done(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	unsigned long flags;
+	int done;
+
+	a4l_mite_get_status(mite_chan);
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	done = mite_chan->done;
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+	return done;
+}
+
+#ifdef CONFIG_DEBUG_MITE
+
+static void a4l_mite_decode(const char *const bit_str[], unsigned int bits);
+
+/* names of bits in mite registers */
+
+static const char *const mite_CHOR_strings[] = {
+	"start", "cont", "stop", "abort",
+	"freset", "clrlc", "clrrb", "clrdone",
+	"clr_lpause", "set_lpause", "clr_send_tc",
+	"set_send_tc", "12", "13", "14",
+	"15", "16", "17", "18",
+	"19", "20", "21", "22",
+	"23", "24", "25", "26",
+	"27", "28", "29", "30",
+	"dmareset",
+};
+
+static const char *const mite_CHCR_strings[] = {
+	"continue", "ringbuff", "2", "3",
+	"4", "5", "6", "7",
+	"8", "9", "10", "11",
+	"12", "13", "bursten", "fifodis",
+	"clr_cont_rb_ie", "set_cont_rb_ie", "clr_lc_ie", "set_lc_ie",
+	"clr_drdy_ie", "set_drdy_ie", "clr_mrdy_ie", "set_mrdy_ie",
+	"clr_done_ie", "set_done_ie", "clr_sar_ie", "set_sar_ie",
+	"clr_linkp_ie", "set_linkp_ie", "clr_dma_ie", "set_dma_ie",
+};
+
+static const char *const mite_MCR_strings[] = {
+	"amdevice", "1", "2", "3",
+	"4", "5", "portio", "portvxi",
+	"psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "11",
+	"12", "13", "blocken", "berhand",
+	"reqsintlim/reqs0", "reqs1", "reqs2", "rd32",
+	"rd512", "rl1", "rl2", "rl8",
+	"24", "25", "26", "27",
+	"28", "29", "30", "stopen",
+};
+
+static const char *const mite_DCR_strings[] = {
+	"amdevice", "1", "2", "3",
+	"4", "5", "portio", "portvxi",
+	"psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "aseqxp2",
+	"aseqxp8", "13", "blocken", "berhand",
+	"reqsintlim", "reqs1", "reqs2", "rd32",
+	"rd512", "rl1", "rl2", "rl8",
+	"23", "24", "25", "27",
+	"28", "wsdevc", "wsdevs", "rwdevpack",
+};
+
+static const char *const mite_LKCR_strings[] = {
+	"amdevice", "1", "2", "3",
+	"4", "5", "portio", "portvxi",
+	"psizebyte", "psizehalf (byte & half = word)", "asequp", "aseqdown",
+	"12", "13", "14", "berhand",
+	"16", "17", "18", "rd32",
+	"rd512", "rl1", "rl2", "rl8",
+	"24", "25", "26", "27",
+	"28", "29", "30", "chngend",
+};
+
+static const char *const mite_CHSR_strings[] = {
+	"d.err0", "d.err1", "m.err0", "m.err1",
+	"l.err0", "l.err1", "drq0", "drq1",
+	"end", "xferr", "operr0", "operr1",
+	"stops", "habort", "sabort", "error",
+	"16", "conts_rb", "18", "linkc",
+	"20", "drdy", "22", "mrdy",
+	"24", "done", "26", "sars",
+	"28", "lpauses", "30", "int",
+};
+
+void a4l_mite_dump_regs(struct mite_channel *mite_chan)
+{
+	unsigned long mite_io_addr =
+		(unsigned long)mite_chan->mite->mite_io_addr;
+	unsigned long addr = 0;
+	unsigned long temp = 0;
+
+	printk("a4l_mite_dump_regs ch%i\n", mite_chan->channel);
+	printk("mite address is  =0x%08lx\n", mite_io_addr);
+
+	addr = mite_io_addr + MITE_CHOR(mite_chan->channel);
+	printk("mite status[CHOR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_CHOR_strings, temp);
+	addr = mite_io_addr + MITE_CHCR(mite_chan->channel);
+	printk("mite status[CHCR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_CHCR_strings, temp);
+	addr = mite_io_addr + MITE_TCR(mite_chan->channel);
+	printk("mite status[TCR] at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+	addr = mite_io_addr + MITE_MCR(mite_chan->channel);
+	printk("mite status[MCR] at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_MCR_strings, temp);
+
+	addr = mite_io_addr + MITE_MAR(mite_chan->channel);
+	printk("mite status[MAR] at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+	addr = mite_io_addr + MITE_DCR(mite_chan->channel);
+	printk("mite status[DCR] at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_DCR_strings, temp);
+	addr = mite_io_addr + MITE_DAR(mite_chan->channel);
+	printk("mite status[DAR] at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+	addr = mite_io_addr + MITE_LKCR(mite_chan->channel);
+	printk("mite status[LKCR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_LKCR_strings, temp);
+	addr = mite_io_addr + MITE_LKAR(mite_chan->channel);
+	printk("mite status[LKAR]at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+
+	addr = mite_io_addr + MITE_CHSR(mite_chan->channel);
+	printk("mite status[CHSR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_CHSR_strings, temp);
+	addr = mite_io_addr + MITE_FCR(mite_chan->channel);
+	printk("mite status[FCR] at 0x%08lx =0x%08x\n\n", addr,
+	       readl((void *)addr));
+}
+
+
+static void a4l_mite_decode(const char *const bit_str[], unsigned int bits)
+{
+	int i;
+
+	for (i = 31; i >= 0; i--) {
+		if (bits & (1 << i)) {
+			printk(" %s", bit_str[i]);
+		}
+	}
+	printk("\n");
+}
+
+#endif /* CONFIG_DEBUG_MITE */
+
+
+static int __init mite_init(void)
+{
+	int err;
+
+	/* Register the mite's PCI driver */
+	err = pci_register_driver(&mite_driver);
+
+	if(err == 0)
+		a4l_mite_list_devices();
+
+	return err;
+}
+
+static void __exit mite_cleanup(void)
+{
+
+	/* Unregister the PCI structure driver */
+	pci_unregister_driver(&mite_driver);
+
+	/* Just paranoia... */
+	while(&mite_devices != mite_devices.next) {
+		struct list_head *this = mite_devices.next;
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		list_del(this);
+		kfree(mite);
+	}
+}
+
+MODULE_LICENSE("GPL");
+module_init(mite_init);
+module_exit(mite_cleanup);
+
+EXPORT_SYMBOL_GPL(a4l_mite_dma_arm);
+EXPORT_SYMBOL_GPL(a4l_mite_dma_disarm);
+EXPORT_SYMBOL_GPL(a4l_mite_sync_input_dma);
+EXPORT_SYMBOL_GPL(a4l_mite_sync_output_dma);
+EXPORT_SYMBOL_GPL(a4l_mite_setup);
+EXPORT_SYMBOL_GPL(a4l_mite_unsetup);
+EXPORT_SYMBOL_GPL(a4l_mite_list_devices);
+EXPORT_SYMBOL_GPL(a4l_mite_request_channel_in_range);
+EXPORT_SYMBOL_GPL(a4l_mite_release_channel);
+EXPORT_SYMBOL_GPL(a4l_mite_prep_dma);
+EXPORT_SYMBOL_GPL(a4l_mite_buf_change);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_written_to_memory_lb);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_written_to_memory_ub);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_read_from_memory_lb);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_read_from_memory_ub);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_in_transit);
+EXPORT_SYMBOL_GPL(a4l_mite_get_status);
+EXPORT_SYMBOL_GPL(a4l_mite_done);
+#ifdef CONFIG_DEBUG_MITE
+EXPORT_SYMBOL_GPL(a4l_mite_decode);
+EXPORT_SYMBOL_GPL(a4l_mite_dump_regs);
+#endif /* CONFIG_DEBUG_MITE */
+++ linux-patched/drivers/xenomai/analogy/national_instruments/mite.h	2022-03-21 12:58:31.115872130 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/ni_660x.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI Mite PCI interface chip
+ * @note Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef __ANALOGY_NI_MITE_H__
+#define __ANALOGY_NI_MITE_H__
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <rtdm/analogy/device.h>
+
+#define PCI_VENDOR_ID_NATINST 0x1093
+#define PCI_MITE_SIZE 4096
+#define PCI_DAQ_SIZE 4096
+#define PCI_DAQ_SIZE_660X 8192
+#define PCIMIO_COMPAT
+#define MAX_MITE_DMA_CHANNELS 8
+
+#define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK)))
+
+struct mite_dma_descriptor {
+	u32 count;
+	u32 addr;
+	u32 next;
+	u32 dar;
+};
+
+struct mite_dma_descriptor_ring {
+	struct pci_dev *pcidev;
+	u32 n_links;
+	struct mite_dma_descriptor *descriptors;
+	dma_addr_t descriptors_dma_addr;
+};
+
+struct mite_channel {
+	struct mite_struct *mite;
+	u32 channel;
+	u32 dir;
+	u32 done;
+	struct mite_dma_descriptor_ring *ring;
+};
+
+struct mite_struct {
+	struct list_head list;
+	rtdm_lock_t lock;
+	u32 used;
+	u32 num_channels;
+
+	struct mite_channel channels[MAX_MITE_DMA_CHANNELS];
+	u32 channel_allocated[MAX_MITE_DMA_CHANNELS];
+
+	struct pci_dev *pcidev;
+	resource_size_t mite_phys_addr;
+	void *mite_io_addr;
+	resource_size_t daq_phys_addr;
+	void *daq_io_addr;
+};
+
+static inline
+struct mite_dma_descriptor_ring *mite_alloc_ring(struct	mite_struct *mite)
+{
+	struct mite_dma_descriptor_ring *ring =
+		kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_DMA);
+
+	if (ring == NULL)
+		return ring;
+
+	memset(ring, 0, sizeof(struct mite_dma_descriptor_ring));
+
+	ring->pcidev = mite->pcidev;
+	if (ring->pcidev == NULL) {
+		kfree(ring);
+		return NULL;
+	}
+
+	return ring;
+};
+
+static inline void mite_free_ring(struct mite_dma_descriptor_ring *ring)
+{
+	if (ring) {
+		if (ring->descriptors) {
+			pci_free_consistent(
+				ring->pcidev,
+				ring->n_links *
+				sizeof(struct mite_dma_descriptor),
+				ring->descriptors, ring->descriptors_dma_addr);
+		}
+		kfree(ring);
+	}
+};
+
+static inline unsigned int mite_irq(struct mite_struct *mite)
+{
+	return mite->pcidev->irq;
+};
+static inline unsigned int mite_device_id(struct mite_struct *mite)
+{
+	return mite->pcidev->device;
+};
+
+int a4l_mite_setup(struct mite_struct *mite, int use_iodwbsr_1);
+void a4l_mite_unsetup(struct mite_struct *mite);
+void a4l_mite_list_devices(void);
+struct mite_struct * a4l_mite_find_device(int bus,
+					  int slot, unsigned short device_id);
+struct mite_channel *
+a4l_mite_request_channel_in_range(struct mite_struct *mite,
+				  struct mite_dma_descriptor_ring *ring,
+				  unsigned min_channel, unsigned max_channel);
+static inline struct mite_channel *mite_request_channel(struct mite_struct
+	*mite, struct mite_dma_descriptor_ring *ring)
+{
+	return a4l_mite_request_channel_in_range(mite, ring, 0,
+		mite->num_channels - 1);
+}
+void a4l_mite_release_channel(struct mite_channel *mite_chan);
+
+void a4l_mite_dma_arm(struct mite_channel *mite_chan);
+void a4l_mite_dma_disarm(struct mite_channel *mite_chan);
+int a4l_mite_sync_input_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd);
+int a4l_mite_sync_output_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd);
+u32 a4l_mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_in_transit(struct mite_channel *mite_chan);
+u32 a4l_mite_get_status(struct mite_channel *mite_chan);
+int a4l_mite_done(struct mite_channel *mite_chan);
+void a4l_mite_prep_dma(struct mite_channel *mite_chan,
+		   unsigned int num_device_bits, unsigned int num_memory_bits);
+int a4l_mite_buf_change(struct mite_dma_descriptor_ring *ring, struct a4l_subdevice *subd);
+
+#ifdef CONFIG_DEBUG_MITE
+void mite_print_chsr(unsigned int chsr);
+void a4l_mite_dump_regs(struct mite_channel *mite_chan);
+#endif
+
+static inline int CHAN_OFFSET(int channel)
+{
+	return 0x500 + 0x100 * channel;
+};
+
+enum mite_registers {
+	/* The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
+	   written and read back.  The bits 0x1f always read as 1.
+	   The rest always read as zero. */
+	MITE_UNKNOWN_DMA_BURST_REG = 0x28,
+	MITE_IODWBSR = 0xc0,	//IO Device Window Base Size Register
+	MITE_IODWBSR_1 = 0xc4,	// IO Device Window Base Size Register 1
+	MITE_IODWCR_1 = 0xf4,
+	MITE_PCI_CONFIG_OFFSET = 0x300,
+	MITE_CSIGR = 0x460	//chip signature
+};
+static inline int MITE_CHOR(int channel)	// channel operation
+{
+	return CHAN_OFFSET(channel) + 0x0;
+};
+static inline int MITE_CHCR(int channel)	// channel control
+{
+	return CHAN_OFFSET(channel) + 0x4;
+};
+static inline int MITE_TCR(int channel)	// transfer count
+{
+	return CHAN_OFFSET(channel) + 0x8;
+};
+static inline int MITE_MCR(int channel)	// memory configuration
+{
+	return CHAN_OFFSET(channel) + 0xc;
+};
+static inline int MITE_MAR(int channel)	// memory address
+{
+	return CHAN_OFFSET(channel) + 0x10;
+};
+static inline int MITE_DCR(int channel)	// device configuration
+{
+	return CHAN_OFFSET(channel) + 0x14;
+};
+static inline int MITE_DAR(int channel)	// device address
+{
+	return CHAN_OFFSET(channel) + 0x18;
+};
+static inline int MITE_LKCR(int channel)	// link configuration
+{
+	return CHAN_OFFSET(channel) + 0x1c;
+};
+static inline int MITE_LKAR(int channel)	// link address
+{
+	return CHAN_OFFSET(channel) + 0x20;
+};
+static inline int MITE_LLKAR(int channel)	// see mite section of tnt5002 manual
+{
+	return CHAN_OFFSET(channel) + 0x24;
+};
+static inline int MITE_BAR(int channel)	// base address
+{
+	return CHAN_OFFSET(channel) + 0x28;
+};
+static inline int MITE_BCR(int channel)	// base count
+{
+	return CHAN_OFFSET(channel) + 0x2c;
+};
+static inline int MITE_SAR(int channel)	// ? address
+{
+	return CHAN_OFFSET(channel) + 0x30;
+};
+static inline int MITE_WSCR(int channel)	// ?
+{
+	return CHAN_OFFSET(channel) + 0x34;
+};
+static inline int MITE_WSER(int channel)	// ?
+{
+	return CHAN_OFFSET(channel) + 0x38;
+};
+static inline int MITE_CHSR(int channel)	// channel status
+{
+	return CHAN_OFFSET(channel) + 0x3c;
+};
+static inline int MITE_FCR(int channel)	// fifo count
+{
+	return CHAN_OFFSET(channel) + 0x40;
+};
+
+enum MITE_IODWBSR_bits {
+	WENAB = 0x80,		// window enable
+};
+
+static inline unsigned MITE_IODWBSR_1_WSIZE_bits(unsigned size)
+{
+	unsigned order = 0;
+	while (size >>= 1)
+		++order;
+	BUG_ON(order < 1);
+	return (order - 1) & 0x1f;
+}
+
+enum MITE_UNKNOWN_DMA_BURST_bits {
+	UNKNOWN_DMA_BURST_ENABLE_BITS = 0x600
+};
+
+static inline int mite_csigr_version(u32 csigr_bits)
+{
+	return csigr_bits & 0xf;
+};
+static inline int mite_csigr_type(u32 csigr_bits)
+{				// original mite = 0, minimite = 1
+	return (csigr_bits >> 4) & 0xf;
+};
+static inline int mite_csigr_mmode(u32 csigr_bits)
+{				// mite mode, minimite = 1
+	return (csigr_bits >> 8) & 0x3;
+};
+static inline int mite_csigr_imode(u32 csigr_bits)
+{				// cpu port interface mode, pci = 0x3
+	return (csigr_bits >> 12) & 0x3;
+};
+static inline int mite_csigr_dmac(u32 csigr_bits)
+{				// number of dma channels
+	return (csigr_bits >> 16) & 0xf;
+};
+static inline int mite_csigr_wpdep(u32 csigr_bits)
+{				// write post fifo depth
+	unsigned int wpdep_bits = (csigr_bits >> 20) & 0x7;
+	if (wpdep_bits == 0)
+		return 0;
+	else
+		return 1 << (wpdep_bits - 1);
+};
+static inline int mite_csigr_wins(u32 csigr_bits)
+{
+	return (csigr_bits >> 24) & 0x1f;
+};
+static inline int mite_csigr_iowins(u32 csigr_bits)
+{				// number of io windows
+	return (csigr_bits >> 29) & 0x7;
+};
+
+enum MITE_MCR_bits {
+	MCRPON = 0,
+};
+
+enum MITE_DCR_bits {
+	DCR_NORMAL = (1 << 29),
+	DCRPON = 0,
+};
+
+enum MITE_CHOR_bits {
+	CHOR_DMARESET = (1 << 31),
+	CHOR_SET_SEND_TC = (1 << 11),
+	CHOR_CLR_SEND_TC = (1 << 10),
+	CHOR_SET_LPAUSE = (1 << 9),
+	CHOR_CLR_LPAUSE = (1 << 8),
+	CHOR_CLRDONE = (1 << 7),
+	CHOR_CLRRB = (1 << 6),
+	CHOR_CLRLC = (1 << 5),
+	CHOR_FRESET = (1 << 4),
+	CHOR_ABORT = (1 << 3),	/* stop without emptying fifo */
+	CHOR_STOP = (1 << 2),	/* stop after emptying fifo */
+	CHOR_CONT = (1 << 1),
+	CHOR_START = (1 << 0),
+	CHOR_PON = (CHOR_CLR_SEND_TC | CHOR_CLR_LPAUSE),
+};
+
+enum MITE_CHCR_bits {
+	CHCR_SET_DMA_IE = (1 << 31),
+	CHCR_CLR_DMA_IE = (1 << 30),
+	CHCR_SET_LINKP_IE = (1 << 29),
+	CHCR_CLR_LINKP_IE = (1 << 28),
+	CHCR_SET_SAR_IE = (1 << 27),
+	CHCR_CLR_SAR_IE = (1 << 26),
+	CHCR_SET_DONE_IE = (1 << 25),
+	CHCR_CLR_DONE_IE = (1 << 24),
+	CHCR_SET_MRDY_IE = (1 << 23),
+	CHCR_CLR_MRDY_IE = (1 << 22),
+	CHCR_SET_DRDY_IE = (1 << 21),
+	CHCR_CLR_DRDY_IE = (1 << 20),
+	CHCR_SET_LC_IE = (1 << 19),
+	CHCR_CLR_LC_IE = (1 << 18),
+	CHCR_SET_CONT_RB_IE = (1 << 17),
+	CHCR_CLR_CONT_RB_IE = (1 << 16),
+	CHCR_FIFODIS = (1 << 15),
+	CHCR_FIFO_ON = 0,
+	CHCR_BURSTEN = (1 << 14),
+	CHCR_NO_BURSTEN = 0,
+	CHCR_BYTE_SWAP_DEVICE = (1 << 6),
+	CHCR_BYTE_SWAP_MEMORY = (1 << 4),
+	CHCR_DIR = (1 << 3),
+	CHCR_DEV_TO_MEM = CHCR_DIR,
+	CHCR_MEM_TO_DEV = 0,
+	CHCR_NORMAL = (0 << 0),
+	CHCR_CONTINUE = (1 << 0),
+	CHCR_RINGBUFF = (2 << 0),
+	CHCR_LINKSHORT = (4 << 0),
+	CHCR_LINKLONG = (5 << 0),
+	CHCRPON =
+		(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
+		CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+		CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE),
+};
+
+enum ConfigRegister_bits {
+	CR_REQS_MASK = 0x7 << 16,
+	CR_ASEQDONT = 0x0 << 10,
+	CR_ASEQUP = 0x1 << 10,
+	CR_ASEQDOWN = 0x2 << 10,
+	CR_ASEQ_MASK = 0x3 << 10,
+	CR_PSIZE8 = (1 << 8),
+	CR_PSIZE16 = (2 << 8),
+	CR_PSIZE32 = (3 << 8),
+	CR_PORTCPU = (0 << 6),
+	CR_PORTIO = (1 << 6),
+	CR_PORTVXI = (2 << 6),
+	CR_PORTMXI = (3 << 6),
+	CR_AMDEVICE = (1 << 0),
+};
+static inline int CR_REQS(int source)
+{
+	return (source & 0x7) << 16;
+};
+static inline int CR_REQSDRQ(unsigned drq_line)
+{
+	/* This also works on m-series when
+	   using channels (drq_line) 4 or 5. */
+	return CR_REQS((drq_line & 0x3) | 0x4);
+}
+static inline int CR_RL(unsigned int retry_limit)
+{
+	int value = 0;
+
+	while (retry_limit) {
+		retry_limit >>= 1;
+		value++;
+	}
+	if (value > 0x7)
+		__a4l_err("bug! retry_limit too large\n");
+
+	return (value & 0x7) << 21;
+}
+
+enum CHSR_bits {
+	CHSR_INT = (1 << 31),
+	CHSR_LPAUSES = (1 << 29),
+	CHSR_SARS = (1 << 27),
+	CHSR_DONE = (1 << 25),
+	CHSR_MRDY = (1 << 23),
+	CHSR_DRDY = (1 << 21),
+	CHSR_LINKC = (1 << 19),
+	CHSR_CONTS_RB = (1 << 17),
+	CHSR_ERROR = (1 << 15),
+	CHSR_SABORT = (1 << 14),
+	CHSR_HABORT = (1 << 13),
+	CHSR_STOPS = (1 << 12),
+	CHSR_OPERR_mask = (3 << 10),
+	CHSR_OPERR_NOERROR = (0 << 10),
+	CHSR_OPERR_FIFOERROR = (1 << 10),
+	CHSR_OPERR_LINKERROR = (1 << 10),	/* ??? */
+	CHSR_XFERR = (1 << 9),
+	CHSR_END = (1 << 8),
+	CHSR_DRQ1 = (1 << 7),
+	CHSR_DRQ0 = (1 << 6),
+	CHSR_LxERR_mask = (3 << 4),
+	CHSR_LBERR = (1 << 4),
+	CHSR_LRERR = (2 << 4),
+	CHSR_LOERR = (3 << 4),
+	CHSR_MxERR_mask = (3 << 2),
+	CHSR_MBERR = (1 << 2),
+	CHSR_MRERR = (2 << 2),
+	CHSR_MOERR = (3 << 2),
+	CHSR_DxERR_mask = (3 << 0),
+	CHSR_DBERR = (1 << 0),
+	CHSR_DRERR = (2 << 0),
+	CHSR_DOERR = (3 << 0),
+};
+
+static inline void mite_dma_reset(struct mite_channel *mite_chan)
+{
+	writel(CHOR_DMARESET | CHOR_FRESET,
+		mite_chan->mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+};
+
+#endif /* !__ANALOGY_NI_MITE_H__ */
+++ linux-patched/drivers/xenomai/analogy/national_instruments/ni_660x.c	2022-03-21 12:58:31.108872199 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/ni_stc.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * comedi/drivers/ni_660x.c
+ * Hardware driver for NI 660x devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Driver: ni_660x
+ * Description: National Instruments 660x counter/timer boards
+ * Devices:
+ * [National Instruments] PCI-6601 (ni_660x), PCI-6602, PXI-6602,
+ * PXI-6608
+ * Author: J.P. Mellor <jpmellor@rose-hulman.edu>,
+ * Herman.Bruyninckx@mech.kuleuven.ac.be,
+ * Wim.Meeussen@mech.kuleuven.ac.be,
+ * Klaas.Gadeyne@mech.kuleuven.ac.be,
+ * Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Updated: Thu Oct 18 12:56:06 EDT 2007
+ * Status: experimental
+
+ * Encoders work.  PulseGeneration (both single pulse and pulse train)
+ * works. Buffered commands work for input but not output.
+
+ * References:
+ * DAQ 660x Register-Level Programmer Manual  (NI 370505A-01)
+ * DAQ 6601/6602 User Manual (NI 322137B-01)
+ */
+
+/*
+ * Integration with Xenomai/Analogy layer based on the
+ * comedi driver. Adaptation made by
+ *   Julien Delange <julien.delange@esa.int>
+ */
+
+#include <linux/interrupt.h>
+
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#include "../intel/8255.h"
+#include "ni_stc.h"
+#include "ni_mio.h"
+#include "ni_tio.h"
+#include "mite.h"
+
+enum io_direction {
+       DIRECTION_INPUT = 0,
+       DIRECTION_OUTPUT = 1,
+       DIRECTION_OPENDRAIN = 2
+};
+
+
+enum ni_660x_constants {
+	min_counter_pfi_chan = 8,
+	max_dio_pfi_chan = 31,
+	counters_per_chip = 4
+};
+
+struct ni_660x_subd_priv {
+   int                     io_bits;
+   unsigned int            state;
+   uint16_t                readback[2];
+   uint16_t                config;
+   struct ni_gpct*         counter;
+};
+
+#define NUM_PFI_CHANNELS 40
+/* Really there are only up to 3 dma channels, but the register layout
+   allows for 4 */
+#define MAX_DMA_CHANNEL 4
+
+static struct a4l_channels_desc chandesc_ni660x = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = NUM_PFI_CHANNELS,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, sizeof(sampl_t)},
+	},
+};
+
+#define subdev_priv ((struct ni_660x_subd_priv*)s->priv)
+
+/* See Register-Level Programmer Manual page 3.1 */
+enum NI_660x_Register {
+	G0InterruptAcknowledge,
+	G0StatusRegister,
+	G1InterruptAcknowledge,
+	G1StatusRegister,
+	G01StatusRegister,
+	G0CommandRegister,
+	STCDIOParallelInput,
+	G1CommandRegister,
+	G0HWSaveRegister,
+	G1HWSaveRegister,
+	STCDIOOutput,
+	STCDIOControl,
+	G0SWSaveRegister,
+	G1SWSaveRegister,
+	G0ModeRegister,
+	G01JointStatus1Register,
+	G1ModeRegister,
+	STCDIOSerialInput,
+	G0LoadARegister,
+	G01JointStatus2Register,
+	G0LoadBRegister,
+	G1LoadARegister,
+	G1LoadBRegister,
+	G0InputSelectRegister,
+	G1InputSelectRegister,
+	G0AutoincrementRegister,
+	G1AutoincrementRegister,
+	G01JointResetRegister,
+	G0InterruptEnable,
+	G1InterruptEnable,
+	G0CountingModeRegister,
+	G1CountingModeRegister,
+	G0SecondGateRegister,
+	G1SecondGateRegister,
+	G0DMAConfigRegister,
+	G0DMAStatusRegister,
+	G1DMAConfigRegister,
+	G1DMAStatusRegister,
+	G2InterruptAcknowledge,
+	G2StatusRegister,
+	G3InterruptAcknowledge,
+	G3StatusRegister,
+	G23StatusRegister,
+	G2CommandRegister,
+	G3CommandRegister,
+	G2HWSaveRegister,
+	G3HWSaveRegister,
+	G2SWSaveRegister,
+	G3SWSaveRegister,
+	G2ModeRegister,
+	G23JointStatus1Register,
+	G3ModeRegister,
+	G2LoadARegister,
+	G23JointStatus2Register,
+	G2LoadBRegister,
+	G3LoadARegister,
+	G3LoadBRegister,
+	G2InputSelectRegister,
+	G3InputSelectRegister,
+	G2AutoincrementRegister,
+	G3AutoincrementRegister,
+	G23JointResetRegister,
+	G2InterruptEnable,
+	G3InterruptEnable,
+	G2CountingModeRegister,
+	G3CountingModeRegister,
+	G3SecondGateRegister,
+	G2SecondGateRegister,
+	G2DMAConfigRegister,
+	G2DMAStatusRegister,
+	G3DMAConfigRegister,
+	G3DMAStatusRegister,
+	DIO32Input,
+	DIO32Output,
+	ClockConfigRegister,
+	GlobalInterruptStatusRegister,
+	DMAConfigRegister,
+	GlobalInterruptConfigRegister,
+	IOConfigReg0_1,
+	IOConfigReg2_3,
+	IOConfigReg4_5,
+	IOConfigReg6_7,
+	IOConfigReg8_9,
+	IOConfigReg10_11,
+	IOConfigReg12_13,
+	IOConfigReg14_15,
+	IOConfigReg16_17,
+	IOConfigReg18_19,
+	IOConfigReg20_21,
+	IOConfigReg22_23,
+	IOConfigReg24_25,
+	IOConfigReg26_27,
+	IOConfigReg28_29,
+	IOConfigReg30_31,
+	IOConfigReg32_33,
+	IOConfigReg34_35,
+	IOConfigReg36_37,
+	IOConfigReg38_39,
+	NumRegisters,
+};
+
+static inline unsigned IOConfigReg(unsigned pfi_channel)
+{
+	unsigned reg = IOConfigReg0_1 + pfi_channel / 2;
+	BUG_ON(reg > IOConfigReg38_39);
+	return reg;
+}
+
+enum ni_660x_register_width {
+	DATA_1B,
+	DATA_2B,
+	DATA_4B
+};
+
+enum ni_660x_register_direction {
+	NI_660x_READ,
+	NI_660x_WRITE,
+	NI_660x_READ_WRITE
+};
+
+enum ni_660x_pfi_output_select {
+	pfi_output_select_high_Z = 0,
+	pfi_output_select_counter = 1,
+	pfi_output_select_do = 2,
+	num_pfi_output_selects
+};
+
+enum ni_660x_subdevices {
+	NI_660X_DIO_SUBDEV = 1,
+	NI_660X_GPCT_SUBDEV_0 = 2
+};
+
+static inline unsigned NI_660X_GPCT_SUBDEV(unsigned index)
+{
+	return NI_660X_GPCT_SUBDEV_0 + index;
+}
+
+struct NI_660xRegisterData {
+
+	const char *name; /*  Register Name */
+	int offset; /*  Offset from base address from GPCT chip */
+	enum ni_660x_register_direction direction;
+	enum ni_660x_register_width size; /*  1 byte, 2 bytes, or 4 bytes */
+};
+
+static const struct NI_660xRegisterData registerData[NumRegisters] = {
+	{"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B},
+	{"G0 Status Register", 0x004, NI_660x_READ, DATA_2B},
+	{"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B},
+	{"G1 Status Register", 0x006, NI_660x_READ, DATA_2B},
+	{"G01 Status Register ", 0x008, NI_660x_READ, DATA_2B},
+	{"G0 Command Register", 0x00C, NI_660x_WRITE, DATA_2B},
+	{"STC DIO Parallel Input", 0x00E, NI_660x_READ, DATA_2B},
+	{"G1 Command Register", 0x00E, NI_660x_WRITE, DATA_2B},
+	{"G0 HW Save Register", 0x010, NI_660x_READ, DATA_4B},
+	{"G1 HW Save Register", 0x014, NI_660x_READ, DATA_4B},
+	{"STC DIO Output", 0x014, NI_660x_WRITE, DATA_2B},
+	{"STC DIO Control", 0x016, NI_660x_WRITE, DATA_2B},
+	{"G0 SW Save Register", 0x018, NI_660x_READ, DATA_4B},
+	{"G1 SW Save Register", 0x01C, NI_660x_READ, DATA_4B},
+	{"G0 Mode Register", 0x034, NI_660x_WRITE, DATA_2B},
+	{"G01 Joint Status 1 Register", 0x036, NI_660x_READ, DATA_2B},
+	{"G1 Mode Register", 0x036, NI_660x_WRITE, DATA_2B},
+	{"STC DIO Serial Input", 0x038, NI_660x_READ, DATA_2B},
+	{"G0 Load A Register", 0x038, NI_660x_WRITE, DATA_4B},
+	{"G01 Joint Status 2 Register", 0x03A, NI_660x_READ, DATA_2B},
+	{"G0 Load B Register", 0x03C, NI_660x_WRITE, DATA_4B},
+	{"G1 Load A Register", 0x040, NI_660x_WRITE, DATA_4B},
+	{"G1 Load B Register", 0x044, NI_660x_WRITE, DATA_4B},
+	{"G0 Input Select Register", 0x048, NI_660x_WRITE, DATA_2B},
+	{"G1 Input Select Register", 0x04A, NI_660x_WRITE, DATA_2B},
+	{"G0 Autoincrement Register", 0x088, NI_660x_WRITE, DATA_2B},
+	{"G1 Autoincrement Register", 0x08A, NI_660x_WRITE, DATA_2B},
+	{"G01 Joint Reset Register", 0x090, NI_660x_WRITE, DATA_2B},
+	{"G0 Interrupt Enable", 0x092, NI_660x_WRITE, DATA_2B},
+	{"G1 Interrupt Enable", 0x096, NI_660x_WRITE, DATA_2B},
+	{"G0 Counting Mode Register", 0x0B0, NI_660x_WRITE, DATA_2B},
+	{"G1 Counting Mode Register", 0x0B2, NI_660x_WRITE, DATA_2B},
+	{"G0 Second Gate Register", 0x0B4, NI_660x_WRITE, DATA_2B},
+	{"G1 Second Gate Register", 0x0B6, NI_660x_WRITE, DATA_2B},
+	{"G0 DMA Config Register", 0x0B8, NI_660x_WRITE, DATA_2B},
+	{"G0 DMA Status Register", 0x0B8, NI_660x_READ, DATA_2B},
+	{"G1 DMA Config Register", 0x0BA, NI_660x_WRITE, DATA_2B},
+	{"G1 DMA Status Register", 0x0BA, NI_660x_READ, DATA_2B},
+	{"G2 Interrupt Acknowledge", 0x104, NI_660x_WRITE, DATA_2B},
+	{"G2 Status Register", 0x104, NI_660x_READ, DATA_2B},
+	{"G3 Interrupt Acknowledge", 0x106, NI_660x_WRITE, DATA_2B},
+	{"G3 Status Register", 0x106, NI_660x_READ, DATA_2B},
+	{"G23 Status Register", 0x108, NI_660x_READ, DATA_2B},
+	{"G2 Command Register", 0x10C, NI_660x_WRITE, DATA_2B},
+	{"G3 Command Register", 0x10E, NI_660x_WRITE, DATA_2B},
+	{"G2 HW Save Register", 0x110, NI_660x_READ, DATA_4B},
+	{"G3 HW Save Register", 0x114, NI_660x_READ, DATA_4B},
+	{"G2 SW Save Register", 0x118, NI_660x_READ, DATA_4B},
+	{"G3 SW Save Register", 0x11C, NI_660x_READ, DATA_4B},
+	{"G2 Mode Register", 0x134, NI_660x_WRITE, DATA_2B},
+	{"G23 Joint Status 1 Register", 0x136, NI_660x_READ, DATA_2B},
+	{"G3 Mode Register", 0x136, NI_660x_WRITE, DATA_2B},
+	{"G2 Load A Register", 0x138, NI_660x_WRITE, DATA_4B},
+	{"G23 Joint Status 2 Register", 0x13A, NI_660x_READ, DATA_2B},
+	{"G2 Load B Register", 0x13C, NI_660x_WRITE, DATA_4B},
+	{"G3 Load A Register", 0x140, NI_660x_WRITE, DATA_4B},
+	{"G3 Load B Register", 0x144, NI_660x_WRITE, DATA_4B},
+	{"G2 Input Select Register", 0x148, NI_660x_WRITE, DATA_2B},
+	{"G3 Input Select Register", 0x14A, NI_660x_WRITE, DATA_2B},
+	{"G2 Autoincrement Register", 0x188, NI_660x_WRITE, DATA_2B},
+	{"G3 Autoincrement Register", 0x18A, NI_660x_WRITE, DATA_2B},
+	{"G23 Joint Reset Register", 0x190, NI_660x_WRITE, DATA_2B},
+	{"G2 Interrupt Enable", 0x192, NI_660x_WRITE, DATA_2B},
+	{"G3 Interrupt Enable", 0x196, NI_660x_WRITE, DATA_2B},
+	{"G2 Counting Mode Register", 0x1B0, NI_660x_WRITE, DATA_2B},
+	{"G3 Counting Mode Register", 0x1B2, NI_660x_WRITE, DATA_2B},
+	{"G3 Second Gate Register", 0x1B6, NI_660x_WRITE, DATA_2B},
+	{"G2 Second Gate Register", 0x1B4, NI_660x_WRITE, DATA_2B},
+	{"G2 DMA Config Register", 0x1B8, NI_660x_WRITE, DATA_2B},
+	{"G2 DMA Status Register", 0x1B8, NI_660x_READ, DATA_2B},
+	{"G3 DMA Config Register", 0x1BA, NI_660x_WRITE, DATA_2B},
+	{"G3 DMA Status Register", 0x1BA, NI_660x_READ, DATA_2B},
+	{"32 bit Digital Input", 0x414, NI_660x_READ, DATA_4B},
+	{"32 bit Digital Output", 0x510, NI_660x_WRITE, DATA_4B},
+	{"Clock Config Register", 0x73C, NI_660x_WRITE, DATA_4B},
+	{"Global Interrupt Status Register", 0x754, NI_660x_READ, DATA_4B},
+	{"DMA Configuration Register", 0x76C, NI_660x_WRITE, DATA_4B},
+	{"Global Interrupt Config Register", 0x770, NI_660x_WRITE, DATA_4B},
+	{"IO Config Register 0-1", 0x77C, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 2-3", 0x77E, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 4-5", 0x780, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 6-7", 0x782, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 8-9", 0x784, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 10-11", 0x786, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 12-13", 0x788, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 14-15", 0x78A, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 16-17", 0x78C, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 18-19", 0x78E, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 20-21", 0x790, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 22-23", 0x792, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 24-25", 0x794, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 26-27", 0x796, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 28-29", 0x798, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 30-31", 0x79A, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 32-33", 0x79C, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 34-35", 0x79E, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 36-37", 0x7A0, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 38-39", 0x7A2, NI_660x_READ_WRITE, DATA_2B}
+};
+
+/* kind of ENABLE for the second counter */
+enum clock_config_register_bits {
+	CounterSwap = 0x1 << 21
+};
+
+/* ioconfigreg */
+static inline unsigned ioconfig_bitshift(unsigned pfi_channel)
+{
+	if (pfi_channel % 2)
+		return 0;
+	else
+		return 8;
+}
+
+static inline unsigned pfi_output_select_mask(unsigned pfi_channel)
+{
+	return 0x3 << ioconfig_bitshift(pfi_channel);
+}
+
+static inline unsigned pfi_output_select_bits(unsigned pfi_channel,
+					      unsigned output_select)
+{
+	return (output_select & 0x3) << ioconfig_bitshift(pfi_channel);
+}
+
+static inline unsigned pfi_input_select_mask(unsigned pfi_channel)
+{
+	return 0x7 << (4 + ioconfig_bitshift(pfi_channel));
+}
+
+static inline unsigned pfi_input_select_bits(unsigned pfi_channel,
+					     unsigned input_select)
+{
+	return (input_select & 0x7) << (4 + ioconfig_bitshift(pfi_channel));
+}
+
+/* Dma configuration register bits */
+static inline unsigned dma_select_mask(unsigned dma_channel)
+{
+	BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
+	return 0x1f << (8 * dma_channel);
+}
+
+enum dma_selection {
+	dma_selection_none = 0x1f,
+};
+
+static inline unsigned dma_selection_counter(unsigned counter_index)
+{
+	BUG_ON(counter_index >= counters_per_chip);
+	return counter_index;
+}
+
+static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection)
+{
+	BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
+	return (selection << (8 * dma_channel)) & dma_select_mask(dma_channel);
+}
+
+static inline unsigned dma_reset_bit(unsigned dma_channel)
+{
+	BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
+	return 0x80 << (8 * dma_channel);
+}
+
+enum global_interrupt_status_register_bits {
+	Counter_0_Int_Bit = 0x100,
+	Counter_1_Int_Bit = 0x200,
+	Counter_2_Int_Bit = 0x400,
+	Counter_3_Int_Bit = 0x800,
+	Cascade_Int_Bit = 0x20000000,
+	Global_Int_Bit = 0x80000000
+};
+
+enum global_interrupt_config_register_bits {
+	Cascade_Int_Enable_Bit = 0x20000000,
+	Global_Int_Polarity_Bit = 0x40000000,
+	Global_Int_Enable_Bit = 0x80000000
+};
+
+/* Offset of the GPCT chips from the base-adress of the card:
+   First chip is at base-address +0x00, etc. */
+static const unsigned GPCT_OFFSET[2] = { 0x0, 0x800 };
+
+/* Board description */
+struct ni_660x_board {
+	unsigned short dev_id;	/* `lspci` will show you this */
+	const char *name;
+	unsigned n_chips;	/* total number of TIO chips */
+};
+
+static const struct ni_660x_board ni_660x_boards[] = {
+	{
+	 .dev_id = 0x2c60,
+	 .name = "PCI-6601",
+	 .n_chips = 1,
+	 },
+	{
+	 .dev_id = 0x1310,
+	 .name = "PCI-6602",
+	 .n_chips = 2,
+	 },
+	{
+	 .dev_id = 0x1360,
+	 .name = "PXI-6602",
+	 .n_chips = 2,
+	 },
+	{
+	 .dev_id = 0x2cc0,
+	 .name = "PXI-6608",
+	 .n_chips = 2,
+	 },
+};
+
+#define NI_660X_MAX_NUM_CHIPS 2
+#define NI_660X_MAX_NUM_COUNTERS (NI_660X_MAX_NUM_CHIPS * counters_per_chip)
+
+static const struct pci_device_id ni_660x_pci_table[] = {
+	{
+	PCI_VENDOR_ID_NATINST, 0x2c60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	PCI_VENDOR_ID_NATINST, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	PCI_VENDOR_ID_NATINST, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	PCI_VENDOR_ID_NATINST, 0x2cc0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	0}
+};
+
+MODULE_DEVICE_TABLE(pci, ni_660x_pci_table);
+
+struct ni_660x_private {
+	struct mite_struct *mite;
+	struct ni_gpct_device *counter_dev;
+	uint64_t pfi_direction_bits;
+
+	struct mite_dma_descriptor_ring
+	  *mite_rings[NI_660X_MAX_NUM_CHIPS][counters_per_chip];
+
+	rtdm_lock_t mite_channel_lock;
+	/* Interrupt_lock prevents races between interrupt and
+	   comedi_poll */
+	rtdm_lock_t interrupt_lock;
+	unsigned int dma_configuration_soft_copies[NI_660X_MAX_NUM_CHIPS];
+	rtdm_lock_t soft_reg_copy_lock;
+	unsigned short pfi_output_selects[NUM_PFI_CHANNELS];
+
+	struct ni_660x_board *board_ptr;
+};
+
+#undef devpriv
+#define devpriv ((struct ni_660x_private *)dev->priv)
+
+static inline struct ni_660x_private *private(struct a4l_device *dev)
+{
+	return (struct ni_660x_private*) dev->priv;
+}
+
+/* Initialized in ni_660x_find_device() */
+static inline const struct ni_660x_board *board(struct a4l_device *dev)
+{
+	return ((struct ni_660x_private*)dev->priv)->board_ptr;
+}
+
+#define n_ni_660x_boards ARRAY_SIZE(ni_660x_boards)
+
+static int ni_660x_attach(struct a4l_device *dev,
+					 a4l_lnkdesc_t *arg);
+static int ni_660x_detach(struct a4l_device *dev);
+static void init_tio_chip(struct a4l_device *dev, int chipset);
+static void ni_660x_select_pfi_output(struct a4l_device *dev,
+				      unsigned pfi_channel,
+				      unsigned output_select);
+
+static struct a4l_driver ni_660x_drv = {
+	.board_name = "analogy_ni_660x",
+	.driver_name = "ni_660x",
+	.owner = THIS_MODULE,
+	.attach = ni_660x_attach,
+	.detach = ni_660x_detach,
+   .privdata_size = sizeof(struct ni_660x_private),
+};
+
+static int ni_660x_set_pfi_routing(struct a4l_device *dev, unsigned chan,
+				   unsigned source);
+
+/* Possible instructions for a GPCT */
+static int ni_660x_GPCT_rinsn(
+			      struct a4l_subdevice *s,
+			      struct a4l_kernel_instruction *insn);
+static int ni_660x_GPCT_insn_config(
+				    struct a4l_subdevice *s,
+				    struct a4l_kernel_instruction *insn);
+static int ni_660x_GPCT_winsn(
+			      struct a4l_subdevice *s,
+			      struct a4l_kernel_instruction *insn);
+
+/* Possible instructions for Digital IO */
+static int ni_660x_dio_insn_config(
+	       struct a4l_subdevice *s,
+	       struct a4l_kernel_instruction *insn);
+static int ni_660x_dio_insn_bits(
+	     struct a4l_subdevice *s,
+	     struct a4l_kernel_instruction *insn);
+
+static inline unsigned ni_660x_num_counters(struct a4l_device *dev)
+{
+	return board(dev)->n_chips * counters_per_chip;
+}
+
+static enum NI_660x_Register ni_gpct_to_660x_register(enum ni_gpct_register reg)
+{
+
+	enum NI_660x_Register ni_660x_register;
+	switch (reg) {
+	case NITIO_G0_Autoincrement_Reg:
+		ni_660x_register = G0AutoincrementRegister;
+		break;
+	case NITIO_G1_Autoincrement_Reg:
+		ni_660x_register = G1AutoincrementRegister;
+		break;
+	case NITIO_G2_Autoincrement_Reg:
+		ni_660x_register = G2AutoincrementRegister;
+		break;
+	case NITIO_G3_Autoincrement_Reg:
+		ni_660x_register = G3AutoincrementRegister;
+		break;
+	case NITIO_G0_Command_Reg:
+		ni_660x_register = G0CommandRegister;
+		break;
+	case NITIO_G1_Command_Reg:
+		ni_660x_register = G1CommandRegister;
+		break;
+	case NITIO_G2_Command_Reg:
+		ni_660x_register = G2CommandRegister;
+		break;
+	case NITIO_G3_Command_Reg:
+		ni_660x_register = G3CommandRegister;
+		break;
+	case NITIO_G0_HW_Save_Reg:
+		ni_660x_register = G0HWSaveRegister;
+		break;
+	case NITIO_G1_HW_Save_Reg:
+		ni_660x_register = G1HWSaveRegister;
+		break;
+	case NITIO_G2_HW_Save_Reg:
+		ni_660x_register = G2HWSaveRegister;
+		break;
+	case NITIO_G3_HW_Save_Reg:
+		ni_660x_register = G3HWSaveRegister;
+		break;
+	case NITIO_G0_SW_Save_Reg:
+		ni_660x_register = G0SWSaveRegister;
+		break;
+	case NITIO_G1_SW_Save_Reg:
+		ni_660x_register = G1SWSaveRegister;
+		break;
+	case NITIO_G2_SW_Save_Reg:
+		ni_660x_register = G2SWSaveRegister;
+		break;
+	case NITIO_G3_SW_Save_Reg:
+		ni_660x_register = G3SWSaveRegister;
+		break;
+	case NITIO_G0_Mode_Reg:
+		ni_660x_register = G0ModeRegister;
+		break;
+	case NITIO_G1_Mode_Reg:
+		ni_660x_register = G1ModeRegister;
+		break;
+	case NITIO_G2_Mode_Reg:
+		ni_660x_register = G2ModeRegister;
+		break;
+	case NITIO_G3_Mode_Reg:
+		ni_660x_register = G3ModeRegister;
+		break;
+	case NITIO_G0_LoadA_Reg:
+		ni_660x_register = G0LoadARegister;
+		break;
+	case NITIO_G1_LoadA_Reg:
+		ni_660x_register = G1LoadARegister;
+		break;
+	case NITIO_G2_LoadA_Reg:
+		ni_660x_register = G2LoadARegister;
+		break;
+	case NITIO_G3_LoadA_Reg:
+		ni_660x_register = G3LoadARegister;
+		break;
+	case NITIO_G0_LoadB_Reg:
+		ni_660x_register = G0LoadBRegister;
+		break;
+	case NITIO_G1_LoadB_Reg:
+		ni_660x_register = G1LoadBRegister;
+		break;
+	case NITIO_G2_LoadB_Reg:
+		ni_660x_register = G2LoadBRegister;
+		break;
+	case NITIO_G3_LoadB_Reg:
+		ni_660x_register = G3LoadBRegister;
+		break;
+	case NITIO_G0_Input_Select_Reg:
+		ni_660x_register = G0InputSelectRegister;
+		break;
+	case NITIO_G1_Input_Select_Reg:
+		ni_660x_register = G1InputSelectRegister;
+		break;
+	case NITIO_G2_Input_Select_Reg:
+		ni_660x_register = G2InputSelectRegister;
+		break;
+	case NITIO_G3_Input_Select_Reg:
+		ni_660x_register = G3InputSelectRegister;
+		break;
+	case NITIO_G01_Status_Reg:
+		ni_660x_register = G01StatusRegister;
+		break;
+	case NITIO_G23_Status_Reg:
+		ni_660x_register = G23StatusRegister;
+		break;
+	case NITIO_G01_Joint_Reset_Reg:
+		ni_660x_register = G01JointResetRegister;
+		break;
+	case NITIO_G23_Joint_Reset_Reg:
+		ni_660x_register = G23JointResetRegister;
+		break;
+	case NITIO_G01_Joint_Status1_Reg:
+		ni_660x_register = G01JointStatus1Register;
+		break;
+	case NITIO_G23_Joint_Status1_Reg:
+		ni_660x_register = G23JointStatus1Register;
+		break;
+	case NITIO_G01_Joint_Status2_Reg:
+		ni_660x_register = G01JointStatus2Register;
+		break;
+	case NITIO_G23_Joint_Status2_Reg:
+		ni_660x_register = G23JointStatus2Register;
+		break;
+	case NITIO_G0_Counting_Mode_Reg:
+		ni_660x_register = G0CountingModeRegister;
+		break;
+	case NITIO_G1_Counting_Mode_Reg:
+		ni_660x_register = G1CountingModeRegister;
+		break;
+	case NITIO_G2_Counting_Mode_Reg:
+		ni_660x_register = G2CountingModeRegister;
+		break;
+	case NITIO_G3_Counting_Mode_Reg:
+		ni_660x_register = G3CountingModeRegister;
+		break;
+	case NITIO_G0_Second_Gate_Reg:
+		ni_660x_register = G0SecondGateRegister;
+		break;
+	case NITIO_G1_Second_Gate_Reg:
+		ni_660x_register = G1SecondGateRegister;
+		break;
+	case NITIO_G2_Second_Gate_Reg:
+		ni_660x_register = G2SecondGateRegister;
+		break;
+	case NITIO_G3_Second_Gate_Reg:
+		ni_660x_register = G3SecondGateRegister;
+		break;
+	case NITIO_G0_DMA_Config_Reg:
+		ni_660x_register = G0DMAConfigRegister;
+		break;
+	case NITIO_G0_DMA_Status_Reg:
+		ni_660x_register = G0DMAStatusRegister;
+		break;
+	case NITIO_G1_DMA_Config_Reg:
+		ni_660x_register = G1DMAConfigRegister;
+		break;
+	case NITIO_G1_DMA_Status_Reg:
+		ni_660x_register = G1DMAStatusRegister;
+		break;
+	case NITIO_G2_DMA_Config_Reg:
+		ni_660x_register = G2DMAConfigRegister;
+		break;
+	case NITIO_G2_DMA_Status_Reg:
+		ni_660x_register = G2DMAStatusRegister;
+		break;
+	case NITIO_G3_DMA_Config_Reg:
+		ni_660x_register = G3DMAConfigRegister;
+		break;
+	case NITIO_G3_DMA_Status_Reg:
+		ni_660x_register = G3DMAStatusRegister;
+		break;
+	case NITIO_G0_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G0InterruptAcknowledge;
+		break;
+	case NITIO_G1_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G1InterruptAcknowledge;
+		break;
+	case NITIO_G2_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G2InterruptAcknowledge;
+		break;
+	case NITIO_G3_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G3InterruptAcknowledge;
+		break;
+	case NITIO_G0_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G1_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G2_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G3_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G0_Interrupt_Enable_Reg:
+		ni_660x_register = G0InterruptEnable;
+		break;
+	case NITIO_G1_Interrupt_Enable_Reg:
+		ni_660x_register = G1InterruptEnable;
+		break;
+	case NITIO_G2_Interrupt_Enable_Reg:
+		ni_660x_register = G2InterruptEnable;
+		break;
+	case NITIO_G3_Interrupt_Enable_Reg:
+		ni_660x_register = G3InterruptEnable;
+		break;
+	default:
+		__a4l_err("%s: unhandled register 0x%x in switch.\n",
+			  __FUNCTION__, reg);
+		BUG();
+		return 0;
+		break;
+	}
+	return ni_660x_register;
+}
+
+static inline void ni_660x_write_register(struct a4l_device *dev,
+					  unsigned chip_index, unsigned bits,
+					  enum NI_660x_Register reg)
+{
+	void *const write_address =
+	    private(dev)->mite->daq_io_addr + GPCT_OFFSET[chip_index] +
+	    registerData[reg].offset;
+
+	switch (registerData[reg].size) {
+	case DATA_2B:
+		writew(bits, write_address);
+		break;
+	case DATA_4B:
+		writel(bits, write_address);
+		break;
+	default:
+		__a4l_err("%s: %s: bug! unhandled case (reg=0x%x) in switch.\n",
+			  __FILE__, __FUNCTION__, reg);
+		BUG();
+		break;
+	}
+}
+
+static inline unsigned ni_660x_read_register(struct a4l_device *dev,
+					     unsigned chip_index,
+					     enum NI_660x_Register reg)
+{
+	void *const read_address =
+	    private(dev)->mite->daq_io_addr + GPCT_OFFSET[chip_index] +
+	    registerData[reg].offset;
+
+	switch (registerData[reg].size) {
+	case DATA_2B:
+		return readw(read_address);
+		break;
+	case DATA_4B:
+		return readl(read_address);
+		break;
+	default:
+		__a4l_err("%s: %s: bug! unhandled case (reg=0x%x) in switch.\n",
+			  __FILE__, __FUNCTION__, reg);
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static void ni_gpct_write_register(struct ni_gpct *counter,
+				   unsigned int bits, enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg);
+
+	ni_660x_write_register(dev, counter->chip_index, bits,
+			       ni_660x_register);
+}
+
+static unsigned ni_gpct_read_register(struct ni_gpct *counter,
+				      enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg);
+
+	return ni_660x_read_register(dev, counter->chip_index,
+				     ni_660x_register);
+}
+
+static inline
+struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private *priv,
+					   struct ni_gpct *counter)
+{
+
+	return priv->mite_rings[counter->chip_index][counter->counter_index];
+}
+
+static inline
+void ni_660x_set_dma_channel(struct a4l_device *dev,
+			     unsigned int mite_channel, struct ni_gpct *counter)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&private(dev)->soft_reg_copy_lock, flags);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] &=
+	    ~dma_select_mask(mite_channel);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] |=
+	    dma_select_bits(mite_channel,
+			    dma_selection_counter(counter->counter_index));
+	ni_660x_write_register(dev, counter->chip_index,
+			       private(dev)->
+			       dma_configuration_soft_copies
+			       [counter->chip_index] |
+			       dma_reset_bit(mite_channel), DMAConfigRegister);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&private(dev)->soft_reg_copy_lock, flags);
+}
+
+static inline
+void ni_660x_unset_dma_channel(struct a4l_device *dev,
+			       unsigned int mite_channel,
+			       struct ni_gpct *counter)
+{
+	unsigned long flags;
+	rtdm_lock_get_irqsave(&private(dev)->soft_reg_copy_lock, flags);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] &=
+	    ~dma_select_mask(mite_channel);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] |=
+	    dma_select_bits(mite_channel, dma_selection_none);
+	ni_660x_write_register(dev, counter->chip_index,
+			       private(dev)->
+			       dma_configuration_soft_copies
+			       [counter->chip_index], DMAConfigRegister);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&private(dev)->soft_reg_copy_lock, flags);
+}
+
+static int ni_660x_request_mite_channel(struct a4l_device *dev,
+					struct ni_gpct *counter,
+					enum io_direction direction)
+{
+	unsigned long flags;
+	struct mite_channel *mite_chan;
+
+	rtdm_lock_get_irqsave(&private(dev)->mite_channel_lock, flags);
+	BUG_ON(counter->mite_chan);
+	mite_chan = mite_request_channel(private(dev)->mite,
+					 mite_ring(private(dev), counter));
+	if (mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags);
+		a4l_err(dev,
+			"%s: failed to reserve mite dma channel for counter.\n",
+			__FUNCTION__);
+		return -EBUSY;
+	}
+	mite_chan->dir = direction;
+	a4l_ni_tio_set_mite_channel(counter, mite_chan);
+	ni_660x_set_dma_channel(dev, mite_chan->channel, counter);
+	rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags);
+	return 0;
+}
+
+void ni_660x_release_mite_channel(struct a4l_device *dev,
+				  struct ni_gpct *counter)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&private(dev)->mite_channel_lock, flags);
+	if (counter->mite_chan) {
+		struct mite_channel *mite_chan = counter->mite_chan;
+
+		ni_660x_unset_dma_channel(dev, mite_chan->channel, counter);
+		a4l_ni_tio_set_mite_channel(counter, NULL);
+		a4l_mite_release_channel(mite_chan);
+	}
+	rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags);
+}
+
+static int ni_660x_cmd(struct a4l_subdevice *s, struct a4l_cmd_desc* cmd)
+{
+	int retval;
+
+	struct ni_gpct *counter = subdev_priv->counter;
+
+	retval = ni_660x_request_mite_channel(s->dev, counter, A4L_INPUT);
+	if (retval) {
+		a4l_err(s->dev,
+			"%s: no dma channel available for use by counter",
+			__FUNCTION__);
+		return retval;
+	}
+
+	a4l_ni_tio_acknowledge_and_confirm (counter, NULL, NULL, NULL, NULL);
+	retval = a4l_ni_tio_cmd(counter, cmd);
+
+	return retval;
+}
+
+static int ni_660x_cmdtest(struct a4l_subdevice *s, struct a4l_cmd_desc *cmd)
+{
+	struct ni_gpct *counter = subdev_priv->counter;
+	return a4l_ni_tio_cmdtest(counter, cmd);
+}
+
+static int ni_660x_cancel(struct a4l_subdevice *s)
+{
+	struct ni_gpct *counter = subdev_priv->counter;
+	int retval;
+
+	retval = a4l_ni_tio_cancel(counter);
+	ni_660x_release_mite_channel(s->dev, counter);
+	return retval;
+}
+
+static void set_tio_counterswap(struct a4l_device *dev, int chipset)
+{
+	/* See P. 3.5 of the Register-Level Programming manual.  The
+	   CounterSwap bit has to be set on the second chip, otherwise
+	   it will try to use the same pins as the first chip.
+	 */
+
+	if (chipset)
+		ni_660x_write_register(dev,
+				       chipset,
+				       CounterSwap, ClockConfigRegister);
+	else
+		ni_660x_write_register(dev,
+				       chipset, 0, ClockConfigRegister);
+}
+
+static void ni_660x_handle_gpct_interrupt(struct a4l_device *dev,
+					  struct a4l_subdevice *s)
+{
+   struct a4l_buffer *buf = s->buf;
+
+   a4l_ni_tio_handle_interrupt(subdev_priv->counter, dev);
+   if ( test_bit(A4L_BUF_EOA_NR, &buf->flags) &&
+	test_bit(A4L_BUF_ERROR_NR, &buf->flags) &&
+	test_bit(A4L_BUF_EOA_NR, &buf->flags))
+	   ni_660x_cancel(s);
+   else
+	   a4l_buf_evt(s, 0);
+}
+
+static int ni_660x_interrupt(unsigned int irq, void *d)
+{
+	struct a4l_device *dev = d;
+	unsigned long flags;
+
+	if (test_bit(A4L_DEV_ATTACHED_NR, &dev->flags))
+		return -ENOENT;
+
+	/* Lock to avoid race with comedi_poll */
+	rtdm_lock_get_irqsave(&private(dev)->interrupt_lock, flags);
+	smp_mb();
+
+	while (&dev->subdvsq != dev->subdvsq.next) {
+		struct list_head *this = dev->subdvsq.next;
+		struct a4l_subdevice *tmp = list_entry(this, struct a4l_subdevice, list);
+		ni_660x_handle_gpct_interrupt(dev, tmp);
+	}
+
+	rtdm_lock_put_irqrestore(&private(dev)->interrupt_lock, flags);
+	return 0;
+}
+
+static int ni_660x_alloc_mite_rings(struct a4l_device *dev)
+{
+	unsigned int i;
+	unsigned int j;
+
+	for (i = 0; i < board(dev)->n_chips; ++i) {
+		for (j = 0; j < counters_per_chip; ++j) {
+			private(dev)->mite_rings[i][j] =
+				mite_alloc_ring(private(dev)->mite);
+			if (private(dev)->mite_rings[i][j] == NULL)
+				return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static void ni_660x_free_mite_rings(struct a4l_device *dev)
+{
+	unsigned int i;
+	unsigned int j;
+
+	for (i = 0; i < board(dev)->n_chips; ++i)
+		for (j = 0; j < counters_per_chip; ++j)
+			mite_free_ring(private(dev)->mite_rings[i][j]);
+}
+
+
+static int __init driver_ni_660x_init_module(void)
+{
+	return a4l_register_drv (&ni_660x_drv);
+}
+
+static void __exit driver_ni_660x_cleanup_module(void)
+{
+	a4l_unregister_drv (&ni_660x_drv);
+}
+
+module_init(driver_ni_660x_init_module);
+module_exit(driver_ni_660x_cleanup_module);
+
+static int ni_660x_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	struct a4l_subdevice *s;
+	int ret;
+	int err;
+	int bus, slot;
+	unsigned i;
+	int nsubdev = 0;
+	unsigned global_interrupt_config_bits;
+	struct mite_struct *mitedev;
+	struct ni_660x_board* boardptr = NULL;
+
+	ret = 0;
+	bus = slot = 0;
+	mitedev = NULL;
+	nsubdev = 0;
+
+	if(arg->opts == NULL || arg->opts_size == 0)
+		bus = slot = 0;
+	else {
+		bus = arg->opts_size >= sizeof(unsigned long) ?
+			((unsigned long *)arg->opts)[0] : 0;
+		slot = arg->opts_size >= sizeof(unsigned long) * 2 ?
+			((unsigned long *)arg->opts)[1] : 0;
+	}
+
+	for (i = 0; ( i < n_ni_660x_boards ) && ( mitedev == NULL ); i++) {
+		mitedev  = a4l_mite_find_device(bus, slot,
+						ni_660x_boards[i].dev_id);
+		boardptr = (struct ni_660x_board*) &ni_660x_boards[i];
+	}
+
+
+	if(mitedev == NULL) {
+		a4l_info(dev, "mite device not found\n");
+		return -ENOENT;
+	}
+
+	a4l_info(dev, "Board found (name=%s), continue initialization ...",
+		 boardptr->name);
+
+	private(dev)->mite      = mitedev;
+	private(dev)->board_ptr = boardptr;
+
+	rtdm_lock_init(&private(dev)->mite_channel_lock);
+	rtdm_lock_init(&private(dev)->interrupt_lock);
+	rtdm_lock_init(&private(dev)->soft_reg_copy_lock);
+	for (i = 0; i < NUM_PFI_CHANNELS; ++i) {
+		private(dev)->pfi_output_selects[i] = pfi_output_select_counter;
+	}
+
+	ret = a4l_mite_setup(private(dev)->mite, 1);
+	if (ret < 0) {
+		a4l_err(dev, "%s: error setting up mite\n", __FUNCTION__);
+		return ret;
+	}
+
+	ret = ni_660x_alloc_mite_rings(dev);
+	if (ret < 0) {
+		a4l_err(dev, "%s: error setting up mite rings\n", __FUNCTION__);
+		return ret;
+	}
+
+	/* Setup first subdevice */
+	s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+	if (s == NULL)
+		return -ENOMEM;
+
+	s->flags = A4L_SUBD_UNUSED;
+
+	err = a4l_add_subd(dev, s);
+	if (err != nsubdev) {
+		a4l_info(dev, "cannot add first subdevice, returns %d, expect %d\n", err, i);
+		return err;
+	}
+
+	nsubdev++;
+
+	/* Setup second subdevice */
+	s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+	if (s == NULL) {
+		a4l_info(dev, "cannot allocate second subdevice\n");
+		return -ENOMEM;
+	}
+
+	s->flags          = A4L_SUBD_DIO;
+	s->flags         |= A4L_SUBD_CMD;
+	s->chan_desc      = &chandesc_ni660x;
+	s->rng_desc       = &range_digital;
+	s->insn_bits      = ni_660x_dio_insn_bits;
+	s->insn_config    = ni_660x_dio_insn_config;
+	s->dev            = dev;
+	subdev_priv->io_bits = 0;
+	ni_660x_write_register(dev, 0, 0, STCDIOControl);
+
+	err = a4l_add_subd(dev, s);
+	if (err != nsubdev)
+		return err;
+
+	nsubdev++;
+
+	private(dev)->counter_dev =
+		a4l_ni_gpct_device_construct(dev,
+					     &ni_gpct_write_register,
+					     &ni_gpct_read_register,
+					     ni_gpct_variant_660x,
+					     ni_660x_num_counters (dev));
+	if (private(dev)->counter_dev == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < ni_660x_num_counters(dev); ++i) {
+		/* TODO: check why there are kmalloc here... and in pcimio */
+		private(dev)->counter_dev->counters[i] =
+			kmalloc(sizeof(struct ni_gpct), GFP_KERNEL);
+		private(dev)->counter_dev->counters[i]->counter_dev =
+			private(dev)->counter_dev;
+		rtdm_lock_init(&(private(dev)->counter_dev->counters[i]->lock));
+	}
+
+	for (i = 0; i < NI_660X_MAX_NUM_COUNTERS; ++i) {
+		if (i < ni_660x_num_counters(dev)) {
+			/* Setup other subdevice */
+			s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+
+			if (s == NULL)
+				return -ENOMEM;
+
+			s->flags             = A4L_SUBD_COUNTER;
+			s->chan_desc         = rtdm_malloc (sizeof (struct a4l_channels_desc));
+			s->chan_desc->length = 3;
+			s->insn_read         = ni_660x_GPCT_rinsn;
+			s->insn_write        = ni_660x_GPCT_winsn;
+			s->insn_config       = ni_660x_GPCT_insn_config;
+			s->do_cmd            = &ni_660x_cmd;
+			s->do_cmdtest        = &ni_660x_cmdtest;
+			s->cancel            = &ni_660x_cancel;
+
+			subdev_priv->counter = private(dev)->counter_dev->counters[i];
+
+			private(dev)->counter_dev->counters[i]->chip_index =
+				i / counters_per_chip;
+			private(dev)->counter_dev->counters[i]->counter_index =
+				i % counters_per_chip;
+		} else {
+			s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+			if (s == NULL)
+				return -ENOMEM;
+			s->flags = A4L_SUBD_UNUSED;
+		}
+
+		err = a4l_add_subd(dev, s);
+
+		if (err != nsubdev)
+			return err;
+
+		nsubdev++;
+	}
+
+	for (i = 0; i < board(dev)->n_chips; ++i)
+		init_tio_chip(dev, i);
+
+	for (i = 0; i < ni_660x_num_counters(dev); ++i)
+		a4l_ni_tio_init_counter(private(dev)->counter_dev->counters[i]);
+
+	for (i = 0; i < NUM_PFI_CHANNELS; ++i) {
+		if (i < min_counter_pfi_chan)
+			ni_660x_set_pfi_routing(dev, i, pfi_output_select_do);
+		else
+			ni_660x_set_pfi_routing(dev, i,
+						pfi_output_select_counter);
+		ni_660x_select_pfi_output(dev, i, pfi_output_select_high_Z);
+	}
+
+
+	/* To be safe, set counterswap bits on tio chips after all the
+	   counter outputs have been set to high impedance mode */
+
+	for (i = 0; i < board(dev)->n_chips; ++i)
+		set_tio_counterswap(dev, i);
+
+	ret = a4l_request_irq(dev,
+			      mite_irq(private(dev)->mite),
+			      ni_660x_interrupt, RTDM_IRQTYPE_SHARED, dev);
+
+	if (ret < 0) {
+		a4l_err(dev, "%s: IRQ not available\n", __FUNCTION__);
+		return ret;
+	}
+
+	global_interrupt_config_bits = Global_Int_Enable_Bit;
+	if (board(dev)->n_chips > 1)
+		global_interrupt_config_bits |= Cascade_Int_Enable_Bit;
+
+	ni_660x_write_register(dev, 0, global_interrupt_config_bits,
+			       GlobalInterruptConfigRegister);
+
+	a4l_info(dev, "attach succeed, ready to be used\n");
+
+	return 0;
+}
+
+static int ni_660x_detach(struct a4l_device *dev)
+{
+	int i;
+
+	a4l_info(dev, "begin to detach the driver ...");
+
+	/* Free irq */
+	if(a4l_get_irq(dev)!=A4L_IRQ_UNUSED)
+		a4l_free_irq(dev,a4l_get_irq(dev));
+
+	if (dev->priv) {
+
+		if (private(dev)->counter_dev) {
+
+			for (i = 0; i < ni_660x_num_counters(dev); ++i)
+				if ((private(dev)->counter_dev->counters[i]) != NULL)
+					kfree (private(dev)->counter_dev->counters[i]);
+
+			a4l_ni_gpct_device_destroy(private(dev)->counter_dev);
+		}
+
+		if (private(dev)->mite) {
+			ni_660x_free_mite_rings(dev);
+			a4l_mite_unsetup(private(dev)->mite);
+		}
+	}
+
+	a4l_info(dev, "driver detached !\n");
+
+	return 0;
+}
+
+static int ni_660x_GPCT_rinsn(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	return a4l_ni_tio_rinsn(subdev_priv->counter, insn);
+}
+
+static void init_tio_chip(struct a4l_device *dev, int chipset)
+{
+	unsigned int i;
+
+	/*  Init dma configuration register */
+	private(dev)->dma_configuration_soft_copies[chipset] = 0;
+	for (i = 0; i < MAX_DMA_CHANNEL; ++i) {
+		private(dev)->dma_configuration_soft_copies[chipset] |=
+		    dma_select_bits(i, dma_selection_none) & dma_select_mask(i);
+	}
+
+	ni_660x_write_register(dev, chipset,
+			       private(dev)->
+			       dma_configuration_soft_copies[chipset],
+			       DMAConfigRegister);
+
+	for (i = 0; i < NUM_PFI_CHANNELS; ++i)
+		ni_660x_write_register(dev, chipset, 0, IOConfigReg(i));
+}
+
+static int ni_660x_GPCT_insn_config(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	return a4l_ni_tio_insn_config (subdev_priv->counter, insn);
+}
+
+static int ni_660x_GPCT_winsn(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	return a4l_ni_tio_winsn(subdev_priv->counter, insn);
+}
+
+static int ni_660x_dio_insn_bits(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	unsigned int* data = (unsigned int*) insn->data;
+	unsigned int base_bitfield_channel = CR_CHAN(insn->chan_desc);
+
+	/*  Check if we have to write some bits */
+	if (data[0]) {
+		subdev_priv->state &= ~(data[0] << base_bitfield_channel);
+		subdev_priv->state |= (data[0] & data[1]) << base_bitfield_channel;
+		/* Write out the new digital output lines */
+		ni_660x_write_register(s->dev, 0, subdev_priv->state, DIO32Output);
+	}
+
+	/* On return, data[1] contains the value of the digital input
+	   and output lines. */
+	data[1] = ni_660x_read_register(s->dev, 0,DIO32Input) >>
+		base_bitfield_channel;
+
+	return 0;
+}
+
+static void ni_660x_select_pfi_output(struct a4l_device *dev,
+				      unsigned pfi_channel,
+				      unsigned output_select)
+{
+	static const unsigned counter_4_7_first_pfi = 8;
+	static const unsigned counter_4_7_last_pfi = 23;
+	unsigned active_chipset = 0;
+	unsigned idle_chipset = 0;
+	unsigned active_bits;
+	unsigned idle_bits;
+
+	if (board(dev)->n_chips > 1) {
+		if (output_select == pfi_output_select_counter &&
+		    pfi_channel >= counter_4_7_first_pfi &&
+		    pfi_channel <= counter_4_7_last_pfi) {
+			active_chipset = 1;
+			idle_chipset = 0;
+		} else {
+			active_chipset = 0;
+			idle_chipset = 1;
+		}
+	}
+
+	if (idle_chipset != active_chipset) {
+
+		idle_bits =ni_660x_read_register(dev, idle_chipset,
+						 IOConfigReg(pfi_channel));
+		idle_bits &= ~pfi_output_select_mask(pfi_channel);
+		idle_bits |=
+		    pfi_output_select_bits(pfi_channel,
+					   pfi_output_select_high_Z);
+		ni_660x_write_register(dev, idle_chipset, idle_bits,
+				       IOConfigReg(pfi_channel));
+	}
+
+	active_bits =
+	    ni_660x_read_register(dev, active_chipset,
+				  IOConfigReg(pfi_channel));
+	active_bits &= ~pfi_output_select_mask(pfi_channel);
+	active_bits |= pfi_output_select_bits(pfi_channel, output_select);
+	ni_660x_write_register(dev, active_chipset, active_bits,
+			       IOConfigReg(pfi_channel));
+}
+
+static int ni_660x_set_pfi_routing(struct a4l_device *dev, unsigned chan,
+				   unsigned source)
+{
+	BUG_ON(chan >= NUM_PFI_CHANNELS);
+
+	if (source > num_pfi_output_selects)
+		return -EINVAL;
+	if (source == pfi_output_select_high_Z)
+		return -EINVAL;
+	if (chan < min_counter_pfi_chan) {
+		if (source == pfi_output_select_counter)
+			return -EINVAL;
+	} else if (chan > max_dio_pfi_chan) {
+		if (source == pfi_output_select_do)
+			return -EINVAL;
+	}
+	BUG_ON(chan >= NUM_PFI_CHANNELS);
+
+	private(dev)->pfi_output_selects[chan] = source;
+	if (private(dev)->pfi_direction_bits & (((uint64_t) 1) << chan))
+		ni_660x_select_pfi_output(dev, chan,
+					  private(dev)->
+					  pfi_output_selects[chan]);
+	return 0;
+}
+
+static unsigned ni_660x_get_pfi_routing(struct a4l_device *dev,
+					unsigned chan)
+{
+	BUG_ON(chan >= NUM_PFI_CHANNELS);
+	return private(dev)->pfi_output_selects[chan];
+}
+
+static void ni660x_config_filter(struct a4l_device *dev,
+				 unsigned pfi_channel,
+				 int filter)
+{
+	unsigned int bits;
+
+	bits = ni_660x_read_register(dev, 0, IOConfigReg(pfi_channel));
+	bits &= ~pfi_input_select_mask(pfi_channel);
+	bits |= pfi_input_select_bits(pfi_channel, filter);
+	ni_660x_write_register(dev, 0, bits, IOConfigReg(pfi_channel));
+}
+
+static int ni_660x_dio_insn_config(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	unsigned int* data = insn->data;
+	int chan = CR_CHAN(insn->chan_desc);
+	struct a4l_device* dev = s->dev;
+
+	if (data == NULL)
+		return -EINVAL;
+
+	/* The input or output configuration of each digital line is
+	 * configured by a special insn_config instruction.  chanspec
+	 * contains the channel to be changed, and data[0] contains the
+	 * value COMEDI_INPUT or COMEDI_OUTPUT. */
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		private(dev)->pfi_direction_bits |= ((uint64_t) 1) << chan;
+		ni_660x_select_pfi_output(dev, chan,
+					  private(dev)->
+					  pfi_output_selects[chan]);
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		private(dev)->pfi_direction_bits &= ~(((uint64_t) 1) << chan);
+		ni_660x_select_pfi_output(dev, chan, pfi_output_select_high_Z);
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] =
+		    (private(dev)->pfi_direction_bits &
+		     (((uint64_t) 1) << chan)) ? A4L_OUTPUT : A4L_INPUT;
+		return 0;
+	case A4L_INSN_CONFIG_SET_ROUTING:
+		return ni_660x_set_pfi_routing(dev, chan, data[1]);
+		break;
+	case A4L_INSN_CONFIG_GET_ROUTING:
+		data[1] = ni_660x_get_pfi_routing(dev, chan);
+		break;
+	case A4L_INSN_CONFIG_FILTER:
+		ni660x_config_filter(dev, chan, data[1]);
+		break;
+	default:
+		return -EINVAL;
+		break;
+	};
+
+	return 0;
+}
+
+
+MODULE_DESCRIPTION("Analogy driver for NI660x series cards");
+MODULE_LICENSE("GPL");
+++ linux-patched/drivers/xenomai/analogy/national_instruments/ni_stc.h	2022-03-21 12:58:31.100872277 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Register descriptions for NI DAQ-STC chip
+ *
+ * Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this code; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * References:
+ * 340934b.pdf  DAQ-STC reference manual
+ *
+ */
+#ifndef __ANALOGY_NI_STC_H__
+#define __ANALOGY_NI_STC_H__
+
+#include "ni_tio.h"
+
+#define _bit15		0x8000
+#define _bit14		0x4000
+#define _bit13		0x2000
+#define _bit12		0x1000
+#define _bit11		0x0800
+#define _bit10		0x0400
+#define _bit9		0x0200
+#define _bit8		0x0100
+#define _bit7		0x0080
+#define _bit6		0x0040
+#define _bit5		0x0020
+#define _bit4		0x0010
+#define _bit3		0x0008
+#define _bit2		0x0004
+#define _bit1		0x0002
+#define _bit0		0x0001
+
+#define NUM_PFI_OUTPUT_SELECT_REGS 6
+
+/* Registers in the National Instruments DAQ-STC chip */
+
+#define Interrupt_A_Ack_Register	2
+#define G0_Gate_Interrupt_Ack			_bit15
+#define G0_TC_Interrupt_Ack			_bit14
+#define AI_Error_Interrupt_Ack			_bit13
+#define AI_STOP_Interrupt_Ack			_bit12
+#define AI_START_Interrupt_Ack			_bit11
+#define AI_START2_Interrupt_Ack			_bit10
+#define AI_START1_Interrupt_Ack			_bit9
+#define AI_SC_TC_Interrupt_Ack			_bit8
+#define AI_SC_TC_Error_Confirm			_bit7
+#define G0_TC_Error_Confirm			_bit6
+#define G0_Gate_Error_Confirm			_bit5
+
+#define AI_Status_1_Register		2
+#define Interrupt_A_St				_bit15
+#define AI_FIFO_Full_St				_bit14
+#define AI_FIFO_Half_Full_St			_bit13
+#define AI_FIFO_Empty_St			_bit12
+#define AI_Overrun_St				_bit11
+#define AI_Overflow_St				_bit10
+#define AI_SC_TC_Error_St			_bit9
+#define AI_START2_St				_bit8
+#define AI_START1_St				_bit7
+#define AI_SC_TC_St				_bit6
+#define AI_START_St				_bit5
+#define AI_STOP_St				_bit4
+#define G0_TC_St				_bit3
+#define G0_Gate_Interrupt_St			_bit2
+#define AI_FIFO_Request_St			_bit1
+#define Pass_Thru_0_Interrupt_St		_bit0
+
+#define AI_Status_2_Register		5
+
+#define Interrupt_B_Ack_Register	3
+#define G1_Gate_Error_Confirm			_bit1
+#define G1_TC_Error_Confirm			_bit2
+#define AO_BC_TC_Trigger_Error_Confirm		_bit3
+#define AO_BC_TC_Error_Confirm			_bit4
+#define AO_UI2_TC_Error_Confrim			_bit5
+#define AO_UI2_TC_Interrupt_Ack			_bit6
+#define AO_UC_TC_Interrupt_Ack			_bit7
+#define AO_BC_TC_Interrupt_Ack			_bit8
+#define AO_START1_Interrupt_Ack			_bit9
+#define AO_UPDATE_Interrupt_Ack			_bit10
+#define AO_START_Interrupt_Ack			_bit11
+#define AO_STOP_Interrupt_Ack			_bit12
+#define AO_Error_Interrupt_Ack			_bit13
+#define G1_TC_Interrupt_Ack			_bit14
+#define G1_Gate_Interrupt_Ack			_bit15
+
+#define AO_Status_1_Register		3
+#define Interrupt_B_St				_bit15
+#define AO_FIFO_Full_St				_bit14
+#define AO_FIFO_Half_Full_St			_bit13
+#define AO_FIFO_Empty_St			_bit12
+#define AO_BC_TC_Error_St			_bit11
+#define AO_START_St				_bit10
+#define AO_Overrun_St				_bit9
+#define AO_START1_St				_bit8
+#define AO_BC_TC_St				_bit7
+#define AO_UC_TC_St				_bit6
+#define AO_UPDATE_St				_bit5
+#define AO_UI2_TC_St				_bit4
+#define G1_TC_St				_bit3
+#define G1_Gate_Interrupt_St			_bit2
+#define AO_FIFO_Request_St			_bit1
+#define Pass_Thru_1_Interrupt_St		_bit0
+
+
+#define AI_Command_2_Register		4
+#define AI_End_On_SC_TC				_bit15
+#define AI_End_On_End_Of_Scan			_bit14
+#define AI_START1_Disable			_bit11
+#define AI_SC_Save_Trace			_bit10
+#define AI_SI_Switch_Load_On_SC_TC		_bit9
+#define AI_SI_Switch_Load_On_STOP		_bit8
+#define AI_SI_Switch_Load_On_TC			_bit7
+#define AI_SC_Switch_Load_On_TC			_bit4
+#define AI_STOP_Pulse				_bit3
+#define AI_START_Pulse				_bit2
+#define AI_START2_Pulse				_bit1
+#define AI_START1_Pulse				_bit0
+
+#define AO_Command_2_Register		5
+#define AO_End_On_BC_TC(x)			(((x) & 0x3) << 14)
+#define AO_Start_Stop_Gate_Enable		_bit13
+#define AO_UC_Save_Trace			_bit12
+#define AO_BC_Gate_Enable			_bit11
+#define AO_BC_Save_Trace			_bit10
+#define AO_UI_Switch_Load_On_BC_TC		_bit9
+#define AO_UI_Switch_Load_On_Stop		_bit8
+#define AO_UI_Switch_Load_On_TC			_bit7
+#define AO_UC_Switch_Load_On_BC_TC		_bit6
+#define AO_UC_Switch_Load_On_TC			_bit5
+#define AO_BC_Switch_Load_On_TC			_bit4
+#define AO_Mute_B				_bit3
+#define AO_Mute_A				_bit2
+#define AO_UPDATE2_Pulse			_bit1
+#define AO_START1_Pulse				_bit0
+
+#define AO_Status_2_Register		6
+
+#define DIO_Parallel_Input_Register	7
+
+#define AI_Command_1_Register		8
+#define AI_Analog_Trigger_Reset			_bit14
+#define AI_Disarm				_bit13
+#define AI_SI2_Arm				_bit12
+#define AI_SI2_Load				_bit11
+#define AI_SI_Arm				_bit10
+#define AI_SI_Load				_bit9
+#define AI_DIV_Arm				_bit8
+#define AI_DIV_Load				_bit7
+#define AI_SC_Arm				_bit6
+#define AI_SC_Load				_bit5
+#define AI_SCAN_IN_PROG_Pulse			_bit4
+#define AI_EXTMUX_CLK_Pulse			_bit3
+#define AI_LOCALMUX_CLK_Pulse			_bit2
+#define AI_SC_TC_Pulse				_bit1
+#define AI_CONVERT_Pulse			_bit0
+
+#define AO_Command_1_Register		9
+#define AO_Analog_Trigger_Reset			_bit15
+#define AO_START_Pulse				_bit14
+#define AO_Disarm				_bit13
+#define AO_UI2_Arm_Disarm			_bit12
+#define AO_UI2_Load				_bit11
+#define AO_UI_Arm				_bit10
+#define AO_UI_Load				_bit9
+#define AO_UC_Arm				_bit8
+#define AO_UC_Load				_bit7
+#define AO_BC_Arm				_bit6
+#define AO_BC_Load				_bit5
+#define AO_DAC1_Update_Mode			_bit4
+#define AO_LDAC1_Source_Select			_bit3
+#define AO_DAC0_Update_Mode			_bit2
+#define AO_LDAC0_Source_Select			_bit1
+#define AO_UPDATE_Pulse				_bit0
+
+
+#define DIO_Output_Register		10
+#define DIO_Parallel_Data_Out(a)                ((a)&0xff)
+#define DIO_Parallel_Data_Mask                  0xff
+#define DIO_SDOUT                               _bit0
+#define DIO_SDIN                                _bit4
+#define DIO_Serial_Data_Out(a)                  (((a)&0xff)<<8)
+#define DIO_Serial_Data_Mask                    0xff00
+
+#define DIO_Control_Register		11
+#define DIO_Software_Serial_Control             _bit11
+#define DIO_HW_Serial_Timebase                  _bit10
+#define DIO_HW_Serial_Enable                    _bit9
+#define DIO_HW_Serial_Start                     _bit8
+#define DIO_Pins_Dir(a)                         ((a)&0xff)
+#define DIO_Pins_Dir_Mask                       0xff
+
+#define AI_Mode_1_Register		12
+#define AI_CONVERT_Source_Select(a)		(((a) & 0x1f) << 11)
+#define AI_SI_Source_select(a)			(((a) & 0x1f) << 6)
+#define AI_CONVERT_Source_Polarity		_bit5
+#define AI_SI_Source_Polarity		_bit4
+#define AI_Start_Stop				_bit3
+#define AI_Mode_1_Reserved			_bit2
+#define AI_Continuous				_bit1
+#define AI_Trigger_Once				_bit0
+
+#define AI_Mode_2_Register		13
+#define AI_SC_Gate_Enable			_bit15
+#define AI_Start_Stop_Gate_Enable		_bit14
+#define AI_Pre_Trigger				_bit13
+#define AI_External_MUX_Present			_bit12
+#define AI_SI2_Initial_Load_Source		_bit9
+#define AI_SI2_Reload_Mode			_bit8
+#define AI_SI_Initial_Load_Source		_bit7
+#define AI_SI_Reload_Mode(a)			(((a) & 0x7)<<4)
+#define AI_SI_Write_Switch			_bit3
+#define AI_SC_Initial_Load_Source		_bit2
+#define AI_SC_Reload_Mode			_bit1
+#define AI_SC_Write_Switch			_bit0
+
+#define AI_SI_Load_A_Registers		14
+#define AI_SI_Load_B_Registers		16
+#define AI_SC_Load_A_Registers		18
+#define AI_SC_Load_B_Registers		20
+#define AI_SI_Save_Registers		64
+#define AI_SC_Save_Registers		66
+
+#define AI_SI2_Load_A_Register		23
+#define AI_SI2_Load_B_Register		25
+
+#define Joint_Status_1_Register         27
+#define DIO_Serial_IO_In_Progress_St            _bit12
+
+#define DIO_Serial_Input_Register       28
+#define Joint_Status_2_Register         29
+#define AO_TMRDACWRs_In_Progress_St		_bit5
+
+#define AO_Mode_1_Register		38
+#define AO_UPDATE_Source_Select(x)		(((x)&0x1f)<<11)
+#define AO_UI_Source_Select(x)			(((x)&0x1f)<<6)
+#define AO_Multiple_Channels			_bit5
+#define AO_UPDATE_Source_Polarity		_bit4
+#define AO_UI_Source_Polarity			_bit3
+#define AO_UC_Switch_Load_Every_TC		_bit2
+#define AO_Continuous				_bit1
+#define AO_Trigger_Once				_bit0
+
+#define AO_Mode_2_Register		39
+#define AO_FIFO_Mode_Mask			( 0x3 << 14 )
+#define AO_FIFO_Mode_HF_to_F			(3<<14)
+#define AO_FIFO_Mode_F				(2<<14)
+#define AO_FIFO_Mode_HF				(1<<14)
+#define AO_FIFO_Mode_E				(0<<14)
+#define AO_FIFO_Retransmit_Enable		_bit13
+#define AO_START1_Disable			_bit12
+#define AO_UC_Initial_Load_Source		_bit11
+#define AO_UC_Write_Switch			_bit10
+#define AO_UI2_Initial_Load_Source		_bit9
+#define AO_UI2_Reload_Mode			_bit8
+#define AO_UI_Initial_Load_Source		_bit7
+#define AO_UI_Reload_Mode(x)			(((x) & 0x7) << 4)
+#define AO_UI_Write_Switch			_bit3
+#define AO_BC_Initial_Load_Source		_bit2
+#define AO_BC_Reload_Mode			_bit1
+#define AO_BC_Write_Switch			_bit0
+
+#define AO_UI_Load_A_Register		40
+#define AO_UI_Load_A_Register_High	40
+#define AO_UI_Load_A_Register_Low	41
+#define AO_UI_Load_B_Register		42
+#define AO_UI_Save_Registers		16
+#define AO_BC_Load_A_Register		44
+#define AO_BC_Load_A_Register_High	44
+#define AO_BC_Load_A_Register_Low	45
+#define AO_BC_Load_B_Register		46
+#define AO_BC_Load_B_Register_High	46
+#define AO_BC_Load_B_Register_Low	47
+#define AO_BC_Save_Registers		18
+#define AO_UC_Load_A_Register		48
+#define AO_UC_Load_A_Register_High	48
+#define AO_UC_Load_A_Register_Low	49
+#define AO_UC_Load_B_Register		50
+#define AO_UC_Save_Registers		20
+
+#define Clock_and_FOUT_Register		56
+#define FOUT_Enable				_bit15
+#define FOUT_Timebase_Select			_bit14
+#define DIO_Serial_Out_Divide_By_2		_bit13
+#define Slow_Internal_Time_Divide_By_2		_bit12
+#define Slow_Internal_Timebase			_bit11
+#define G_Source_Divide_By_2			_bit10
+#define Clock_To_Board_Divide_By_2		_bit9
+#define Clock_To_Board				_bit8
+#define AI_Output_Divide_By_2			_bit7
+#define AI_Source_Divide_By_2			_bit6
+#define AO_Output_Divide_By_2			_bit5
+#define AO_Source_Divide_By_2			_bit4
+#define FOUT_Divider_mask			0xf
+#define FOUT_Divider(x)				(((x) & 0xf) << 0)
+
+#define IO_Bidirection_Pin_Register	57
+#define	RTSI_Trig_Direction_Register	58
+#define	Drive_RTSI_Clock_Bit			0x1
+#define	Use_RTSI_Clock_Bit			0x2
+
+static inline unsigned int RTSI_Output_Bit(unsigned channel, int is_mseries)
+{
+	unsigned max_channel;
+	unsigned base_bit_shift;
+	if(is_mseries)
+	{
+		base_bit_shift = 8;
+		max_channel = 7;
+	}else
+	{
+		base_bit_shift = 9;
+		max_channel = 6;
+	}
+	if(channel > max_channel)
+	{
+		rtdm_printk("%s: bug, invalid RTSI_channel=%i\n",
+			    __FUNCTION__, channel);
+		return 0;
+	}
+	return 1 << (base_bit_shift + channel);
+}
+
+#define Interrupt_Control_Register	59
+#define Interrupt_B_Enable			_bit15
+#define Interrupt_B_Output_Select(x)		((x)<<12)
+#define Interrupt_A_Enable			_bit11
+#define Interrupt_A_Output_Select(x)		((x)<<8)
+#define Pass_Thru_0_Interrupt_Polarity		_bit3
+#define Pass_Thru_1_Interrupt_Polarity		_bit2
+#define Interrupt_Output_On_3_Pins		_bit1
+#define Interrupt_Output_Polarity		_bit0
+
+#define AI_Output_Control_Register	60
+#define AI_START_Output_Select			_bit10
+#define AI_SCAN_IN_PROG_Output_Select(x)	(((x) & 0x3) << 8)
+#define AI_EXTMUX_CLK_Output_Select(x)		(((x) & 0x3) << 6)
+#define AI_LOCALMUX_CLK_Output_Select(x)	((x)<<4)
+#define AI_SC_TC_Output_Select(x)		((x)<<2)
+#define AI_CONVERT_Output_High_Z		0
+#define AI_CONVERT_Output_Ground		1
+#define AI_CONVERT_Output_Enable_Low		2
+#define AI_CONVERT_Output_Enable_High		3
+#define AI_CONVERT_Output_Select(x)		((x) & 0x3)
+
+#define AI_START_STOP_Select_Register	62
+#define AI_START_Polarity			_bit15
+#define AI_STOP_Polarity			_bit14
+#define AI_STOP_Sync				_bit13
+#define AI_STOP_Edge				_bit12
+#define AI_STOP_Select(a)			(((a) & 0x1f)<<7)
+#define AI_START_Sync				_bit6
+#define AI_START_Edge				_bit5
+#define AI_START_Select(a)			((a) & 0x1f)
+
+#define AI_Trigger_Select_Register	63
+#define AI_START1_Polarity			_bit15
+#define AI_START2_Polarity			_bit14
+#define AI_START2_Sync				_bit13
+#define AI_START2_Edge				_bit12
+#define AI_START2_Select(a)			(((a) & 0x1f) << 7)
+#define AI_START1_Sync				_bit6
+#define AI_START1_Edge				_bit5
+#define AI_START1_Select(a)			((a) & 0x1f)
+
+#define AI_DIV_Load_A_Register	64
+
+#define AO_Start_Select_Register	66
+#define AO_UI2_Software_Gate			_bit15
+#define AO_UI2_External_Gate_Polarity		_bit14
+#define AO_START_Polarity			_bit13
+#define AO_AOFREQ_Enable			_bit12
+#define AO_UI2_External_Gate_Select(a)		(((a) & 0x1f) << 7)
+#define AO_START_Sync				_bit6
+#define AO_START_Edge				_bit5
+#define AO_START_Select(a)			((a) & 0x1f)
+
+#define AO_Trigger_Select_Register	67
+#define AO_UI2_External_Gate_Enable		_bit15
+#define AO_Delayed_START1			_bit14
+#define AO_START1_Polarity			_bit13
+#define AO_UI2_Source_Polarity			_bit12
+#define AO_UI2_Source_Select(x)			(((x)&0x1f)<<7)
+#define AO_START1_Sync				_bit6
+#define AO_START1_Edge				_bit5
+#define AO_START1_Select(x)			(((x)&0x1f)<<0)
+
+#define AO_Mode_3_Register		70
+#define AO_UI2_Switch_Load_Next_TC		_bit13
+#define AO_UC_Switch_Load_Every_BC_TC		_bit12
+#define AO_Trigger_Length			_bit11
+#define AO_Stop_On_Overrun_Error		_bit5
+#define AO_Stop_On_BC_TC_Trigger_Error		_bit4
+#define AO_Stop_On_BC_TC_Error			_bit3
+#define AO_Not_An_UPDATE			_bit2
+#define AO_Software_Gate			_bit1
+#define AO_Last_Gate_Disable			_bit0	/* M Series only */
+
+#define Joint_Reset_Register		72
+#define Software_Reset				_bit11
+#define AO_Configuration_End			_bit9
+#define AI_Configuration_End			_bit8
+#define AO_Configuration_Start			_bit5
+#define AI_Configuration_Start			_bit4
+#define G1_Reset				_bit3
+#define G0_Reset				_bit2
+#define AO_Reset				_bit1
+#define AI_Reset				_bit0
+
+#define Interrupt_A_Enable_Register	73
+#define Pass_Thru_0_Interrupt_Enable		_bit9
+#define G0_Gate_Interrupt_Enable		_bit8
+#define AI_FIFO_Interrupt_Enable		_bit7
+#define G0_TC_Interrupt_Enable			_bit6
+#define AI_Error_Interrupt_Enable		_bit5
+#define AI_STOP_Interrupt_Enable		_bit4
+#define AI_START_Interrupt_Enable		_bit3
+#define AI_START2_Interrupt_Enable		_bit2
+#define AI_START1_Interrupt_Enable		_bit1
+#define AI_SC_TC_Interrupt_Enable		_bit0
+
+#define Interrupt_B_Enable_Register	75
+#define Pass_Thru_1_Interrupt_Enable		_bit11
+#define G1_Gate_Interrupt_Enable		_bit10
+#define G1_TC_Interrupt_Enable			_bit9
+#define AO_FIFO_Interrupt_Enable		_bit8
+#define AO_UI2_TC_Interrupt_Enable		_bit7
+#define AO_UC_TC_Interrupt_Enable		_bit6
+#define AO_Error_Interrupt_Enable		_bit5
+#define AO_STOP_Interrupt_Enable		_bit4
+#define AO_START_Interrupt_Enable		_bit3
+#define AO_UPDATE_Interrupt_Enable		_bit2
+#define AO_START1_Interrupt_Enable		_bit1
+#define AO_BC_TC_Interrupt_Enable		_bit0
+
+#define Second_IRQ_A_Enable_Register	74
+#define AI_SC_TC_Second_Irq_Enable		_bit0
+#define AI_START1_Second_Irq_Enable		_bit1
+#define AI_START2_Second_Irq_Enable		_bit2
+#define AI_START_Second_Irq_Enable		_bit3
+#define AI_STOP_Second_Irq_Enable		_bit4
+#define AI_Error_Second_Irq_Enable		_bit5
+#define G0_TC_Second_Irq_Enable			_bit6
+#define AI_FIFO_Second_Irq_Enable		_bit7
+#define G0_Gate_Second_Irq_Enable		_bit8
+#define Pass_Thru_0_Second_Irq_Enable		_bit9
+
+#define Second_IRQ_B_Enable_Register	76
+#define AO_BC_TC_Second_Irq_Enable		_bit0
+#define AO_START1_Second_Irq_Enable		_bit1
+#define AO_UPDATE_Second_Irq_Enable		_bit2
+#define AO_START_Second_Irq_Enable		_bit3
+#define AO_STOP_Second_Irq_Enable		_bit4
+#define AO_Error_Second_Irq_Enable		_bit5
+#define AO_UC_TC_Second_Irq_Enable		_bit6
+#define AO_UI2_TC_Second_Irq_Enable		_bit7
+#define AO_FIFO_Second_Irq_Enable		_bit8
+#define G1_TC_Second_Irq_Enable			_bit9
+#define G1_Gate_Second_Irq_Enable		_bit10
+#define Pass_Thru_1_Second_Irq_Enable		_bit11
+
+#define AI_Personal_Register		77
+#define AI_SHIFTIN_Pulse_Width			_bit15
+#define AI_EOC_Polarity				_bit14
+#define AI_SOC_Polarity				_bit13
+#define AI_SHIFTIN_Polarity			_bit12
+#define AI_CONVERT_Pulse_Timebase		_bit11
+#define AI_CONVERT_Pulse_Width			_bit10
+#define AI_CONVERT_Original_Pulse		_bit9
+#define AI_FIFO_Flags_Polarity			_bit8
+#define AI_Overrun_Mode				_bit7
+#define AI_EXTMUX_CLK_Pulse_Width		_bit6
+#define AI_LOCALMUX_CLK_Pulse_Width		_bit5
+#define AI_AIFREQ_Polarity			_bit4
+
+#define AO_Personal_Register		78
+#define AO_Interval_Buffer_Mode			_bit3
+#define AO_BC_Source_Select			_bit4
+#define AO_UPDATE_Pulse_Width			_bit5
+#define AO_UPDATE_Pulse_Timebase		_bit6
+#define AO_UPDATE_Original_Pulse		_bit7
+#define AO_DMA_PIO_Control			_bit8 /* M Series: reserved */
+#define AO_AOFREQ_Polarity			_bit9 /* M Series: reserved */
+#define AO_FIFO_Enable				_bit10
+#define AO_FIFO_Flags_Polarity			_bit11 /* M Series: reserved */
+#define AO_TMRDACWR_Pulse_Width			_bit12
+#define AO_Fast_CPU				_bit13 /* M Series: reserved */
+#define AO_Number_Of_DAC_Packages		_bit14 /* 1 for "single" mode,
+							  0 for "dual" */
+#define AO_Multiple_DACS_Per_Package		_bit15 /* M Series only */
+
+#define	RTSI_Trig_A_Output_Register	79
+
+#define	RTSI_Trig_B_Output_Register	80
+#define RTSI_Sub_Selection_1_Bit		_bit15 /* not for M Series */
+#define RTSI_Trig_Output_Bits(x, y)		((y & 0xf) << ((x % 4) * 4))
+#define RTSI_Trig_Output_Mask(x)		(0xf << ((x % 4) * 4))
+#define RTSI_Trig_Output_Source(x, y)		((y >> ((x % 4) * 4)) & 0xf)
+
+#define	RTSI_Board_Register		81
+#define Write_Strobe_0_Register		82
+#define Write_Strobe_1_Register		83
+#define Write_Strobe_2_Register		84
+#define Write_Strobe_3_Register		85
+
+#define AO_Output_Control_Register	86
+#define AO_External_Gate_Enable			_bit15
+#define AO_External_Gate_Select(x)		(((x)&0x1f)<<10)
+#define AO_Number_Of_Channels(x)		(((x)&0xf)<<6)
+#define AO_UPDATE2_Output_Select(x)		(((x)&0x3)<<4)
+#define AO_External_Gate_Polarity		_bit3
+#define AO_UPDATE2_Output_Toggle		_bit2
+#define AO_Update_Output_High_Z			0
+#define AO_Update_Output_Ground			1
+#define AO_Update_Output_Enable_Low		2
+#define AO_Update_Output_Enable_High		3
+#define AO_UPDATE_Output_Select(x)		(x&0x3)
+
+#define AI_Mode_3_Register		87
+#define AI_Trigger_Length			_bit15
+#define AI_Delay_START				_bit14
+#define AI_Software_Gate			_bit13
+#define AI_SI_Special_Trigger_Delay		_bit12
+#define AI_SI2_Source_Select			_bit11
+#define AI_Delayed_START2			_bit10
+#define AI_Delayed_START1			_bit9
+#define AI_External_Gate_Mode			_bit8
+#define AI_FIFO_Mode_HF_to_E			(3<<6)
+#define AI_FIFO_Mode_F				(2<<6)
+#define AI_FIFO_Mode_HF				(1<<6)
+#define AI_FIFO_Mode_NE				(0<<6)
+#define AI_External_Gate_Polarity		_bit5
+#define AI_External_Gate_Select(a)		((a) & 0x1f)
+
+#define G_Autoincrement_Register(a)	(68+(a))
+#define G_Command_Register(a)		(6+(a))
+#define G_HW_Save_Register(a)		(8+(a)*2)
+#define G_HW_Save_Register_High(a)	(8+(a)*2)
+#define G_HW_Save_Register_Low(a)	(9+(a)*2)
+#define G_Input_Select_Register(a)	(36+(a))
+#define G_Load_A_Register(a)		(28+(a)*4)
+#define G_Load_A_Register_High(a)	(28+(a)*4)
+#define G_Load_A_Register_Low(a)	(29+(a)*4)
+#define G_Load_B_Register(a)		(30+(a)*4)
+#define G_Load_B_Register_High(a)	(30+(a)*4)
+#define G_Load_B_Register_Low(a)	(31+(a)*4)
+#define G_Mode_Register(a)		(26+(a))
+#define G_Save_Register(a)		(12+(a)*2)
+#define G_Save_Register_High(a)		(12+(a)*2)
+#define G_Save_Register_Low(a)		(13+(a)*2)
+#define G_Status_Register		4
+#define Analog_Trigger_Etc_Register	61
+
+/* command register */
+#define G_Disarm_Copy			_bit15		/* strobe */
+#define G_Save_Trace_Copy		_bit14
+#define G_Arm_Copy			_bit13		/* strobe */
+#define G_Bank_Switch_Start		_bit10		/* strobe */
+#define G_Little_Big_Endian		_bit9
+#define G_Synchronized_Gate		_bit8
+#define G_Write_Switch			_bit7
+#define G_Up_Down(a)			(((a)&0x03)<<5)
+#define G_Disarm			_bit4		/* strobe */
+#define G_Analog_Trigger_Reset		_bit3		/* strobe */
+#define G_Save_Trace			_bit1
+#define G_Arm				_bit0		/* strobe */
+
+/* channel agnostic names for the command register #defines */
+#define G_Bank_Switch_Enable		_bit12
+#define G_Bank_Switch_Mode		_bit11
+#define G_Load				_bit2		/* strobe */
+
+/* input select register */
+#define G_Gate_Select(a)		(((a)&0x1f)<<7)
+#define G_Source_Select(a)		(((a)&0x1f)<<2)
+#define G_Write_Acknowledges_Irq	_bit1
+#define G_Read_Acknowledges_Irq		_bit0
+
+/* same input select register, but with channel agnostic names */
+#define G_Source_Polarity		_bit15
+#define G_Output_Polarity		_bit14
+#define G_OR_Gate			_bit13
+#define G_Gate_Select_Load_Source	_bit12
+
+/* mode register */
+#define G_Loading_On_TC			_bit12
+#define G_Output_Mode(a)		(((a)&0x03)<<8)
+#define G_Trigger_Mode_For_Edge_Gate(a)	(((a)&0x03)<<3)
+#define G_Gating_Mode(a)		(((a)&0x03)<<0)
+
+/* same input mode register, but with channel agnostic names */
+#define G_Load_Source_Select		_bit7
+#define G_Reload_Source_Switching	_bit15
+#define G_Loading_On_Gate		_bit14
+#define G_Gate_Polarity		_bit13
+
+#define G_Counting_Once(a)		(((a)&0x03)<<10)
+#define G_Stop_Mode(a)			(((a)&0x03)<<5)
+#define G_Gate_On_Both_Edges		_bit2
+
+/* G_Status_Register */
+#define G1_Gate_Error_St		_bit15
+#define G0_Gate_Error_St		_bit14
+#define G1_TC_Error_St			_bit13
+#define G0_TC_Error_St			_bit12
+#define G1_No_Load_Between_Gates_St	_bit11
+#define G0_No_Load_Between_Gates_St	_bit10
+#define G1_Armed_St			_bit9
+#define G0_Armed_St			_bit8
+#define G1_Stale_Data_St		_bit7
+#define G0_Stale_Data_St		_bit6
+#define G1_Next_Load_Source_St		_bit5
+#define G0_Next_Load_Source_St		_bit4
+#define G1_Counting_St			_bit3
+#define G0_Counting_St			_bit2
+#define G1_Save_St			_bit1
+#define G0_Save_St			_bit0
+
+/* general purpose counter timer */
+#define G_Autoincrement(a)              ((a)<<0)
+
+/*Analog_Trigger_Etc_Register*/
+#define Analog_Trigger_Mode(x) ((x) & 0x7)
+#define Analog_Trigger_Enable _bit3
+#define Analog_Trigger_Drive _bit4
+#define GPFO_1_Output_Select		_bit7
+#define GPFO_0_Output_Select(a)		((a)<<11)
+#define GPFO_0_Output_Enable		_bit14
+#define GPFO_1_Output_Enable		_bit15
+
+/* Additional windowed registers unique to E series */
+
+/* 16 bit registers shadowed from DAQ-STC */
+#define Window_Address			0x00
+#define Window_Data			0x02
+
+#define Configuration_Memory_Clear	82
+#define ADC_FIFO_Clear			83
+#define DAC_FIFO_Clear			84
+
+/* i/o port offsets */
+
+/* 8 bit registers */
+#define XXX_Status			0x01
+#define PROMOUT					_bit0
+#define AI_FIFO_LOWER_NOT_EMPTY			_bit3
+
+#define Serial_Command			0x0d
+#define Misc_Command			0x0f
+#define Port_A				0x19
+#define Port_B				0x1b
+#define Port_C				0x1d
+#define Configuration			0x1f
+#define Strobes				0x01
+#define Channel_A_Mode			0x03
+#define Channel_B_Mode			0x05
+#define Channel_C_Mode			0x07
+#define AI_AO_Select			0x09
+#define AI_DMA_Select_Shift		0
+#define AI_DMA_Select_Mask		0xf
+#define AO_DMA_Select_Shift		4
+#define AO_DMA_Select_Mask		(0xf << AO_DMA_Select_Shift)
+
+#define G0_G1_Select			0x0b
+
+static inline unsigned ni_stc_dma_channel_select_bitfield(unsigned channel)
+{
+	if(channel < 4) return 1 << channel;
+	if(channel == 4) return 0x3;
+	if(channel == 5) return 0x5;
+	BUG();
+	return 0;
+}
+static inline unsigned GPCT_DMA_Select_Bits(unsigned gpct_index, unsigned mite_channel)
+{
+	BUG_ON(gpct_index > 1);
+	return ni_stc_dma_channel_select_bitfield(mite_channel) << (4 * gpct_index);
+}
+static inline unsigned GPCT_DMA_Select_Mask(unsigned gpct_index)
+{
+	BUG_ON(gpct_index > 1);
+	return 0xf << (4 * gpct_index);
+}
+
+/* 16 bit registers */
+
+#define Configuration_Memory_Low	0x10
+#define AI_DITHER				_bit9
+#define AI_LAST_CHANNEL				_bit15
+
+#define Configuration_Memory_High	0x12
+#define AI_AC_COUPLE				_bit11
+#define AI_DIFFERENTIAL				_bit12
+#define AI_COMMON				_bit13
+#define AI_GROUND				(_bit12|_bit13)
+#define AI_CONFIG_CHANNEL(x)			(x&0x3f)
+
+#define ADC_FIFO_Data_Register		0x1c
+
+#define AO_Configuration		0x16
+#define AO_Bipolar		_bit0
+#define AO_Deglitch		_bit1
+#define AO_Ext_Ref		_bit2
+#define AO_Ground_Ref		_bit3
+#define AO_Channel(x)		((x) << 8)
+
+#define DAC_FIFO_Data			0x1e
+#define DAC0_Direct_Data		0x18
+#define DAC1_Direct_Data		0x1a
+
+/* 611x registers (these boards differ from the e-series) */
+
+#define Magic_611x			0x19 /* w8 (new) */
+#define Calibration_Channel_Select_611x	0x1a /* w16 (new) */
+#define ADC_FIFO_Data_611x		0x1c /* r32 (incompatible) */
+#define AI_FIFO_Offset_Load_611x	0x05 /* r8 (new) */
+#define DAC_FIFO_Data_611x		0x14 /* w32 (incompatible) */
+#define Cal_Gain_Select_611x		0x05 /* w8 (new) */
+
+#define AO_Window_Address_611x		0x18
+#define AO_Window_Data_611x		0x1e
+
+/* 6143 registers */
+#define Magic_6143			0x19 /* w8 */
+#define G0G1_DMA_Select_6143		0x0B /* w8 */
+#define PipelineDelay_6143		0x1f /* w8 */
+#define EOC_Set_6143			0x1D /* w8 */
+#define AIDMA_Select_6143		0x09 /* w8 */
+#define AIFIFO_Data_6143		0x8C /* w32 */
+#define AIFIFO_Flag_6143		0x84 /* w32 */
+#define AIFIFO_Control_6143		0x88 /* w32 */
+#define AIFIFO_Status_6143		0x88 /* w32 */
+#define AIFIFO_DMAThreshold_6143	0x90 /* w32 */
+#define AIFIFO_Words_Available_6143	0x94 /* w32 */
+
+#define Calibration_Channel_6143	0x42 /* w16 */
+#define Calibration_LowTime_6143	0x20 /* w16 */
+#define Calibration_HighTime_6143	0x22 /* w16 */
+#define Relay_Counter_Load_Val__6143	0x4C /* w32 */
+#define Signature_6143			0x50 /* w32 */
+#define Release_Date_6143		0x54 /* w32 */
+#define Release_Oldest_Date_6143	0x58 /* w32 */
+
+#define Calibration_Channel_6143_RelayOn	0x8000	/* Calibration relay switch On */
+#define Calibration_Channel_6143_RelayOff	0x4000	/* Calibration relay switch Off */
+#define Calibration_Channel_Gnd_Gnd	0x00	/* Offset Calibration */
+#define Calibration_Channel_2v5_Gnd	0x02	/* 2.5V Reference */
+#define Calibration_Channel_Pwm_Gnd	0x05	/* +/- 5V Self Cal */
+#define Calibration_Channel_2v5_Pwm	0x0a	/* PWM Calibration */
+#define Calibration_Channel_Pwm_Pwm	0x0d	/* CMRR */
+#define Calibration_Channel_Gnd_Pwm	0x0e	/* PWM Calibration */
+
+/* 671x, 611x registers */
+
+/* 671xi 611x windowed ao registers */
+#define AO_Immediate_671x			0x11 /* W 16 */
+#define AO_Timed_611x				0x10 /* W 16 */
+#define AO_FIFO_Offset_Load_611x		0x13 /* W32 */
+#define AO_Later_Single_Point_Updates		0x14 /* W 16 */
+#define AO_Waveform_Generation_611x		0x15 /* W 16 */
+#define AO_Misc_611x				0x16 /* W 16 */
+#define AO_Calibration_Channel_Select_67xx	0x17 /* W 16 */
+#define AO_Configuration_2_67xx			0x18 /* W 16 */
+#define CAL_ADC_Command_67xx			0x19 /* W 8 */
+#define CAL_ADC_Status_67xx			0x1a /* R 8 */
+#define CAL_ADC_Data_67xx			0x1b /* R 16 */
+#define CAL_ADC_Config_Data_High_Word_67xx	0x1c /* RW 16 */
+#define CAL_ADC_Config_Data_Low_Word_67xx	0x1d /* RW 16 */
+
+static inline unsigned int DACx_Direct_Data_671x(int channel)
+{
+	return channel;
+}
+
+#define CLEAR_WG				_bit0
+
+#define CSCFG_CAL_CONTROL_MASK			0x7
+#define CSCFG_SELF_CAL_OFFSET			0x1
+#define CSCFG_SELF_CAL_GAIN			0x2
+#define CSCFG_SELF_CAL_OFFSET_GAIN		0x3
+#define CSCFG_SYSTEM_CAL_OFFSET			0x5
+#define CSCFG_SYSTEM_CAL_GAIN			0x6
+#define CSCFG_DONE				(1 << 3)
+#define CSCFG_POWER_SAVE_SELECT			(1 << 4)
+#define CSCFG_PORT_MODE				(1 << 5)
+#define CSCFG_RESET_VALID			(1 << 6)
+#define CSCFG_RESET				(1 << 7)
+#define CSCFG_UNIPOLAR				(1 << 12)
+#define CSCFG_WORD_RATE_2180_CYCLES		(0x0 << 13)
+#define CSCFG_WORD_RATE_1092_CYCLES		(0x1 << 13)
+#define CSCFG_WORD_RATE_532_CYCLES		(0x2 << 13)
+#define CSCFG_WORD_RATE_388_CYCLES		(0x3 << 13)
+#define CSCFG_WORD_RATE_324_CYCLES		(0x4 << 13)
+#define CSCFG_WORD_RATE_17444_CYCLES		(0x5 << 13)
+#define CSCFG_WORD_RATE_8724_CYCLES		(0x6 << 13)
+#define CSCFG_WORD_RATE_4364_CYCLES		(0x7 << 13)
+#define CSCFG_WORD_RATE_MASK			(0x7 << 13)
+#define CSCFG_LOW_POWER				(1 << 16)
+
+#define CS5529_CONFIG_DOUT(x)			(1 << (18 + x))
+#define CS5529_CONFIG_AOUT(x)			(1 << (22 + x))
+
+/* cs5529 command bits */
+#define CSCMD_POWER_SAVE			_bit0
+#define CSCMD_REGISTER_SELECT_MASK		0xe
+#define CSCMD_OFFSET_REGISTER			0x0
+#define CSCMD_GAIN_REGISTER			_bit1
+#define CSCMD_CONFIG_REGISTER			_bit2
+#define CSCMD_READ				_bit4
+#define CSCMD_CONTINUOUS_CONVERSIONS		_bit5
+#define CSCMD_SINGLE_CONVERSION			_bit6
+#define CSCMD_COMMAND				_bit7
+
+/* cs5529 status bits */
+#define CSS_ADC_BUSY				_bit0
+#define CSS_OSC_DETECT				_bit1 /* indicates adc error */
+#define CSS_OVERRANGE				_bit3
+
+#define SerDacLd(x)			(0x08<<(x))
+
+/*
+	This is stuff unique to the NI E series drivers,
+	but I thought I'd put it here anyway.
+*/
+
+enum
+{
+	ai_gain_16 = 0,
+	ai_gain_8,
+	ai_gain_14,
+	ai_gain_4,
+	ai_gain_611x,
+	ai_gain_622x,
+	ai_gain_628x,
+	ai_gain_6143
+};
+enum caldac_enum
+{
+	caldac_none=0,
+	mb88341,
+	dac8800,
+	dac8043,
+	ad8522,
+	ad8804,
+	ad8842,
+	ad8804_debug
+};
+enum ni_reg_type
+{
+	ni_reg_normal = 0x0,
+	ni_reg_611x = 0x1,
+	ni_reg_6711 = 0x2,
+	ni_reg_6713 = 0x4,
+	ni_reg_67xx_mask = 0x6,
+	ni_reg_6xxx_mask = 0x7,
+	ni_reg_622x = 0x8,
+	ni_reg_625x = 0x10,
+	ni_reg_628x = 0x18,
+	ni_reg_m_series_mask = 0x18,
+	ni_reg_6143 = 0x20
+};
+
+/* M Series registers offsets */
+#define M_Offset_CDIO_DMA_Select		0x7 /* write */
+#define M_Offset_SCXI_Status			0x7 /* read */
+#define M_Offset_AI_AO_Select			0x9 /* write, same offset as e-series */
+#define M_Offset_SCXI_Serial_Data_In		0x9 /* read */
+#define M_Offset_G0_G1_Select			0xb /* write, same offset as e-series */
+#define M_Offset_Misc_Command			0xf
+#define M_Offset_SCXI_Serial_Data_Out		0x11
+#define M_Offset_SCXI_Control			0x13
+#define M_Offset_SCXI_Output_Enable		0x15
+#define M_Offset_AI_FIFO_Data			0x1c
+#define M_Offset_Static_Digital_Output		0x24 /* write */
+#define M_Offset_Static_Digital_Input		0x24 /* read */
+#define M_Offset_DIO_Direction			0x28
+#define M_Offset_Cal_PWM			0x40
+#define M_Offset_AI_Config_FIFO_Data		0x5e
+#define M_Offset_Interrupt_C_Enable		0x88 /* write */
+#define M_Offset_Interrupt_C_Status		0x88 /* read */
+#define M_Offset_Analog_Trigger_Control		0x8c
+#define M_Offset_AO_Serial_Interrupt_Enable	0xa0
+#define M_Offset_AO_Serial_Interrupt_Ack	0xa1 /* write */
+#define M_Offset_AO_Serial_Interrupt_Status	0xa1 /* read */
+#define M_Offset_AO_Calibration			0xa3
+#define M_Offset_AO_FIFO_Data			0xa4
+#define M_Offset_PFI_Filter			0xb0
+#define M_Offset_RTSI_Filter			0xb4
+#define M_Offset_SCXI_Legacy_Compatibility	0xbc
+#define M_Offset_Interrupt_A_Ack		0x104 /* write */
+#define M_Offset_AI_Status_1			0x104 /* read */
+#define M_Offset_Interrupt_B_Ack		0x106 /* write */
+#define M_Offset_AO_Status_1			0x106 /* read */
+#define M_Offset_AI_Command_2			0x108 /* write */
+#define M_Offset_G01_Status			0x108 /* read */
+#define M_Offset_AO_Command_2			0x10a
+#define M_Offset_AO_Status_2			0x10c /* read */
+#define M_Offset_G0_Command			0x10c /* write */
+#define M_Offset_G1_Command			0x10e /* write */
+#define M_Offset_G0_HW_Save			0x110
+#define M_Offset_G0_HW_Save_High		0x110
+#define M_Offset_AI_Command_1			0x110
+#define M_Offset_G0_HW_Save_Low			0x112
+#define M_Offset_AO_Command_1			0x112
+#define M_Offset_G1_HW_Save			0x114
+#define M_Offset_G1_HW_Save_High		0x114
+#define M_Offset_G1_HW_Save_Low			0x116
+#define M_Offset_AI_Mode_1			0x118
+#define M_Offset_G0_Save			0x118
+#define M_Offset_G0_Save_High			0x118
+#define M_Offset_AI_Mode_2			0x11a
+#define M_Offset_G0_Save_Low			0x11a
+#define M_Offset_AI_SI_Load_A			0x11c
+#define M_Offset_G1_Save			0x11c
+#define M_Offset_G1_Save_High			0x11c
+#define M_Offset_G1_Save_Low			0x11e
+#define M_Offset_AI_SI_Load_B			0x120 /* write */
+#define M_Offset_AO_UI_Save			0x120 /* read */
+#define M_Offset_AI_SC_Load_A			0x124 /* write */
+#define M_Offset_AO_BC_Save			0x124 /* read */
+#define M_Offset_AI_SC_Load_B			0x128 /* write */
+#define M_Offset_AO_UC_Save			0x128 /* read */
+#define M_Offset_AI_SI2_Load_A			0x12c
+#define M_Offset_AI_SI2_Load_B			0x130
+#define M_Offset_G0_Mode			0x134
+#define M_Offset_G1_Mode			0x136 /* write */
+#define M_Offset_Joint_Status_1			0x136 /* read */
+#define M_Offset_G0_Load_A			0x138
+#define M_Offset_Joint_Status_2			0x13a
+#define M_Offset_G0_Load_B			0x13c
+#define M_Offset_G1_Load_A			0x140
+#define M_Offset_G1_Load_B			0x144
+#define M_Offset_G0_Input_Select		0x148
+#define M_Offset_G1_Input_Select		0x14a
+#define M_Offset_AO_Mode_1			0x14c
+#define M_Offset_AO_Mode_2			0x14e
+#define M_Offset_AO_UI_Load_A			0x150
+#define M_Offset_AO_UI_Load_B			0x154
+#define M_Offset_AO_BC_Load_A			0x158
+#define M_Offset_AO_BC_Load_B			0x15c
+#define M_Offset_AO_UC_Load_A			0x160
+#define M_Offset_AO_UC_Load_B			0x164
+#define M_Offset_Clock_and_FOUT			0x170
+#define M_Offset_IO_Bidirection_Pin		0x172
+#define M_Offset_RTSI_Trig_Direction		0x174
+#define M_Offset_Interrupt_Control		0x176
+#define M_Offset_AI_Output_Control		0x178
+#define M_Offset_Analog_Trigger_Etc		0x17a
+#define M_Offset_AI_START_STOP_Select		0x17c
+#define M_Offset_AI_Trigger_Select		0x17e
+#define M_Offset_AI_SI_Save			0x180 /* read */
+#define M_Offset_AI_DIV_Load_A			0x180 /* write */
+#define M_Offset_AI_SC_Save			0x184 /* read */
+#define M_Offset_AO_Start_Select		0x184 /* write */
+#define M_Offset_AO_Trigger_Select		0x186
+#define M_Offset_AO_Mode_3			0x18c
+#define M_Offset_G0_Autoincrement		0x188
+#define M_Offset_G1_Autoincrement		0x18a
+#define M_Offset_Joint_Reset			0x190
+#define M_Offset_Interrupt_A_Enable		0x192
+#define M_Offset_Interrupt_B_Enable		0x196
+#define M_Offset_AI_Personal			0x19a
+#define M_Offset_AO_Personal			0x19c
+#define M_Offset_RTSI_Trig_A_Output		0x19e
+#define M_Offset_RTSI_Trig_B_Output		0x1a0
+#define M_Offset_RTSI_Shared_MUX		0x1a2
+#define M_Offset_AO_Output_Control		0x1ac
+#define M_Offset_AI_Mode_3			0x1ae
+#define M_Offset_Configuration_Memory_Clear	0x1a4
+#define M_Offset_AI_FIFO_Clear			0x1a6
+#define M_Offset_AO_FIFO_Clear			0x1a8
+#define M_Offset_G0_Counting_Mode		0x1b0
+#define M_Offset_G1_Counting_Mode		0x1b2
+#define M_Offset_G0_Second_Gate			0x1b4
+#define M_Offset_G1_Second_Gate			0x1b6
+#define M_Offset_G0_DMA_Config			0x1b8 /* write */
+#define M_Offset_G0_DMA_Status			0x1b8 /* read */
+#define M_Offset_G1_DMA_Config			0x1ba /* write */
+#define M_Offset_G1_DMA_Status			0x1ba /* read */
+#define M_Offset_G0_MSeries_ABZ			0x1c0
+#define M_Offset_G1_MSeries_ABZ			0x1c2
+#define M_Offset_Clock_and_Fout2		0x1c4
+#define M_Offset_PLL_Control			0x1c6
+#define M_Offset_PLL_Status			0x1c8
+#define M_Offset_PFI_Output_Select_1		0x1d0
+#define M_Offset_PFI_Output_Select_2		0x1d2
+#define M_Offset_PFI_Output_Select_3		0x1d4
+#define M_Offset_PFI_Output_Select_4		0x1d6
+#define M_Offset_PFI_Output_Select_5		0x1d8
+#define M_Offset_PFI_Output_Select_6		0x1da
+#define M_Offset_PFI_DI				0x1dc
+#define M_Offset_PFI_DO				0x1de
+#define M_Offset_AI_Config_FIFO_Bypass		0x218
+#define M_Offset_SCXI_DIO_Enable		0x21c
+#define M_Offset_CDI_FIFO_Data			0x220 /* read */
+#define M_Offset_CDO_FIFO_Data			0x220 /* write */
+#define M_Offset_CDIO_Status			0x224 /* read */
+#define M_Offset_CDIO_Command			0x224 /* write */
+#define M_Offset_CDI_Mode			0x228
+#define M_Offset_CDO_Mode			0x22c
+#define M_Offset_CDI_Mask_Enable		0x230
+#define M_Offset_CDO_Mask_Enable		0x234
+#define M_Offset_AO_Waveform_Order(x)		(0xc2 + 0x4 * x)
+#define M_Offset_AO_Config_Bank(x)		(0xc3 + 0x4 * x)
+#define M_Offset_DAC_Direct_Data(x)		(0xc0 + 0x4 * x)
+#define M_Offset_Gen_PWM(x)			(0x44 + 0x2 * x)
+
+static inline int M_Offset_Static_AI_Control(int i)
+{
+	int offset[] =
+	{
+		0x64,
+		0x261,
+		0x262,
+		0x263,
+	};
+	if(((unsigned)i) >= sizeof(offset) / sizeof(offset[0]))
+	{
+		rtdm_printk("%s: invalid channel=%i\n", __FUNCTION__, i);
+		return offset[0];
+	}
+	return offset[i];
+};
+static inline int M_Offset_AO_Reference_Attenuation(int channel)
+{
+	int offset[] =
+	{
+		0x264,
+		0x265,
+		0x266,
+		0x267
+	};
+	if(((unsigned)channel) >= sizeof(offset) / sizeof(offset[0]))
+	{
+		rtdm_printk("%s: invalid channel=%i\n", __FUNCTION__, channel);
+		return offset[0];
+	}
+	return offset[channel];
+};
+static inline unsigned M_Offset_PFI_Output_Select(unsigned n)
+{
+	if(n < 1 || n > NUM_PFI_OUTPUT_SELECT_REGS)
+	{
+		rtdm_printk("%s: invalid pfi output select register=%i\n", __FUNCTION__, n);
+		return M_Offset_PFI_Output_Select_1;
+	}
+	return M_Offset_PFI_Output_Select_1 + (n - 1) * 2;
+}
+
+#define MSeries_AI_Config_Channel_Type_Mask			(0x7 << 6)
+#define MSeries_AI_Config_Channel_Type_Calibration_Bits		0x0
+#define MSeries_AI_Config_Channel_Type_Differential_Bits	(0x1 << 6)
+#define MSeries_AI_Config_Channel_Type_Common_Ref_Bits		(0x2 << 6)
+#define MSeries_AI_Config_Channel_Type_Ground_Ref_Bits		(0x3 << 6)
+#define MSeries_AI_Config_Channel_Type_Aux_Bits			(0x5 << 6)
+#define MSeries_AI_Config_Channel_Type_Ghost_Bits		(0x7 << 6)
+#define MSeries_AI_Config_Polarity_Bit				0x1000 /* 0 for 2's complement encoding */
+#define MSeries_AI_Config_Dither_Bit				0x2000
+#define MSeries_AI_Config_Last_Channel_Bit			0x4000
+#define MSeries_AI_Config_Channel_Bits(x)			(x & 0xf)
+#define MSeries_AI_Config_Gain_Bits(x)				((x & 0x7) << 9)
+
+static inline
+unsigned int MSeries_AI_Config_Bank_Bits(unsigned int reg_type,
+					 unsigned int channel)
+{
+	unsigned int bits = channel & 0x30;
+	if (reg_type == ni_reg_622x) {
+		if (channel & 0x40)
+			bits |= 0x400;
+	}
+	return bits;
+}
+
+#define MSeries_PLL_In_Source_Select_RTSI0_Bits			0xb
+#define MSeries_PLL_In_Source_Select_Star_Trigger_Bits		0x14
+#define MSeries_PLL_In_Source_Select_RTSI7_Bits			0x1b
+#define MSeries_PLL_In_Source_Select_PXI_Clock10		0x1d
+#define MSeries_PLL_In_Source_Select_Mask			0x1f
+#define MSeries_Timebase1_Select_Bit				0x20 /* use PLL for timebase 1 */
+#define MSeries_Timebase3_Select_Bit				0x40 /* use PLL for timebase 3 */
+/* Use 10MHz instead of 20MHz for RTSI clock frequency.  Appears
+   to have no effect, at least on pxi-6281, which always uses
+   20MHz rtsi clock frequency */
+#define MSeries_RTSI_10MHz_Bit					0x80
+
+static inline
+unsigned int MSeries_PLL_In_Source_Select_RTSI_Bits(unsigned int RTSI_channel)
+{
+	if(RTSI_channel > 7)
+	{
+		rtdm_printk("%s: bug, invalid RTSI_channel=%i\n", __FUNCTION__, RTSI_channel);
+		return 0;
+	}
+	if(RTSI_channel == 7) return MSeries_PLL_In_Source_Select_RTSI7_Bits;
+	else return MSeries_PLL_In_Source_Select_RTSI0_Bits + RTSI_channel;
+}
+
+#define MSeries_PLL_Enable_Bit					0x1000
+#define MSeries_PLL_VCO_Mode_200_325MHz_Bits			0x0
+#define MSeries_PLL_VCO_Mode_175_225MHz_Bits			0x2000
+#define MSeries_PLL_VCO_Mode_100_225MHz_Bits			0x4000
+#define MSeries_PLL_VCO_Mode_75_150MHz_Bits			0x6000
+
+static inline
+unsigned int MSeries_PLL_Divisor_Bits(unsigned int divisor)
+{
+	static const unsigned int max_divisor = 0x10;
+	if(divisor < 1 || divisor > max_divisor)
+	{
+		rtdm_printk("%s: bug, invalid divisor=%i\n", __FUNCTION__, divisor);
+		return 0;
+	}
+	return (divisor & 0xf) << 8;
+}
+static inline
+unsigned int MSeries_PLL_Multiplier_Bits(unsigned int multiplier)
+{
+	static const unsigned int max_multiplier = 0x100;
+	if(multiplier < 1 || multiplier > max_multiplier)
+	{
+		rtdm_printk("%s: bug, invalid multiplier=%i\n", __FUNCTION__, multiplier);
+		return 0;
+	}
+	return multiplier & 0xff;
+}
+
+#define MSeries_PLL_Locked_Bit				0x1
+
+#define MSeries_AI_Bypass_Channel_Mask			0x7
+#define MSeries_AI_Bypass_Bank_Mask			0x78
+#define MSeries_AI_Bypass_Cal_Sel_Pos_Mask		0x380
+#define MSeries_AI_Bypass_Cal_Sel_Neg_Mask		0x1c00
+#define MSeries_AI_Bypass_Mode_Mux_Mask			0x6000
+#define MSeries_AO_Bypass_AO_Cal_Sel_Mask		0x38000
+#define MSeries_AI_Bypass_Gain_Mask			0x1c0000
+#define MSeries_AI_Bypass_Dither_Bit			0x200000
+#define MSeries_AI_Bypass_Polarity_Bit			0x400000 /* 0 for 2's complement encoding */
+#define MSeries_AI_Bypass_Config_FIFO_Bit		0x80000000
+#define MSeries_AI_Bypass_Cal_Sel_Pos_Bits(x)		((x << 7) & \
+							 MSeries_AI_Bypass_Cal_Sel_Pos_Mask)
+#define MSeries_AI_Bypass_Cal_Sel_Neg_Bits(x)		((x << 10) & \
+							 MSeries_AI_Bypass_Cal_Sel_Pos_Mask)
+#define MSeries_AI_Bypass_Gain_Bits(x)			((x << 18) & \
+							 MSeries_AI_Bypass_Gain_Mask)
+
+#define MSeries_AO_DAC_Offset_Select_Mask		0x7
+#define MSeries_AO_DAC_Offset_0V_Bits			0x0
+#define MSeries_AO_DAC_Offset_5V_Bits			0x1
+#define MSeries_AO_DAC_Reference_Mask			0x38
+#define MSeries_AO_DAC_Reference_10V_Internal_Bits	0x0
+#define MSeries_AO_DAC_Reference_5V_Internal_Bits	0x8
+#define MSeries_AO_Update_Timed_Bit			0x40
+#define MSeries_AO_Bipolar_Bit				0x80 /* turns on 2's complement encoding */
+
+#define MSeries_Attenuate_x5_Bit			0x1
+
+#define MSeries_Cal_PWM_High_Time_Bits(x)		((x << 16) & 0xffff0000)
+#define MSeries_Cal_PWM_Low_Time_Bits(x)		(x & 0xffff)
+
+#define MSeries_PFI_Output_Select_Mask(x)		(0x1f << (x % 3) * 5)
+#define MSeries_PFI_Output_Select_Bits(x, y)		((y & 0x1f) << ((x % 3) * 5))
+// inverse to MSeries_PFI_Output_Select_Bits
+#define MSeries_PFI_Output_Select_Source(x, y)		((y >> ((x % 3) * 5)) & 0x1f)
+
+#define Gi_DMA_BankSW_Error_Bit				0x10
+#define Gi_DMA_Reset_Bit				0x8
+#define Gi_DMA_Int_Enable_Bit				0x4
+#define Gi_DMA_Write_Bit				0x2
+#define Gi_DMA_Enable_Bit				0x1
+
+#define MSeries_PFI_Filter_Select_Mask(x)		(0x3 << (x * 2))
+#define MSeries_PFI_Filter_Select_Bits(x, y)		((y << (x * 2)) & \
+							 MSeries_PFI_Filter_Select_Mask(x))
+
+/* CDIO DMA select bits */
+#define CDI_DMA_Select_Shift	0
+#define CDI_DMA_Select_Mask	0xf
+#define CDO_DMA_Select_Shift	4
+#define CDO_DMA_Select_Mask	0xf << CDO_DMA_Select_Shift
+
+/* CDIO status bits */
+#define CDO_FIFO_Empty_Bit	0x1
+#define CDO_FIFO_Full_Bit	0x2
+#define CDO_FIFO_Request_Bit	0x4
+#define CDO_Overrun_Bit		0x8
+#define CDO_Underflow_Bit	0x10
+#define CDI_FIFO_Empty_Bit	0x10000
+#define CDI_FIFO_Full_Bit	0x20000
+#define CDI_FIFO_Request_Bit	0x40000
+#define CDI_Overrun_Bit		0x80000
+#define CDI_Overflow_Bit	0x100000
+
+/* CDIO command bits */
+#define CDO_Disarm_Bit					0x1
+#define CDO_Arm_Bit					0x2
+#define CDI_Disarm_Bit					0x4
+#define CDI_Arm_Bit					0x8
+#define CDO_Reset_Bit					0x10
+#define CDI_Reset_Bit					0x20
+#define CDO_Error_Interrupt_Enable_Set_Bit		0x40
+#define CDO_Error_Interrupt_Enable_Clear_Bit		0x80
+#define CDI_Error_Interrupt_Enable_Set_Bit		0x100
+#define CDI_Error_Interrupt_Enable_Clear_Bit		0x200
+#define CDO_FIFO_Request_Interrupt_Enable_Set_Bit	0x400
+#define CDO_FIFO_Request_Interrupt_Enable_Clear_Bit	0x800
+#define CDI_FIFO_Request_Interrupt_Enable_Set_Bit	0x1000
+#define CDI_FIFO_Request_Interrupt_Enable_Clear_Bit	0x2000
+#define CDO_Error_Interrupt_Confirm_Bit			0x4000
+#define CDI_Error_Interrupt_Confirm_Bit			0x8000
+#define CDO_Empty_FIFO_Interrupt_Enable_Set_Bit		0x10000
+#define CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit	0x20000
+#define CDO_SW_Update_Bit				0x80000
+#define CDI_SW_Update_Bit				0x100000
+
+/* CDIO mode bits */
+#define CDI_Sample_Source_Select_Mask	0x3f
+#define CDI_Halt_On_Error_Bit		0x200
+/* sample clock on falling edge */
+#define CDI_Polarity_Bit		0x400
+/* set for half full mode, clear for not empty mode */
+#define CDI_FIFO_Mode_Bit		0x800
+/* data lanes specify which dio channels map to byte or word accesses
+   to the dio fifos */
+#define CDI_Data_Lane_Mask		0x3000
+#define CDI_Data_Lane_0_15_Bits		0x0
+#define CDI_Data_Lane_16_31_Bits	0x1000
+#define CDI_Data_Lane_0_7_Bits		0x0
+#define CDI_Data_Lane_8_15_Bits		0x1000
+#define CDI_Data_Lane_16_23_Bits	0x2000
+#define CDI_Data_Lane_24_31_Bits	0x3000
+
+/* CDO mode bits */
+#define CDO_Sample_Source_Select_Mask	0x3f
+#define CDO_Retransmit_Bit		0x100
+#define CDO_Halt_On_Error_Bit		0x200
+/* sample clock on falling edge */
+#define CDO_Polarity_Bit		0x400
+/* set for half full mode, clear for not full mode */
+#define CDO_FIFO_Mode_Bit		0x800
+/* data lanes specify which dio channels map to byte or word accesses
+   to the dio fifos */
+#define CDO_Data_Lane_Mask		0x3000
+#define CDO_Data_Lane_0_15_Bits		0x0
+#define CDO_Data_Lane_16_31_Bits	0x1000
+#define CDO_Data_Lane_0_7_Bits		0x0
+#define CDO_Data_Lane_8_15_Bits		0x1000
+#define CDO_Data_Lane_16_23_Bits	0x2000
+#define CDO_Data_Lane_24_31_Bits	0x3000
+
+/* Interrupt C bits */
+#define Interrupt_Group_C_Enable_Bit	0x1
+#define Interrupt_Group_C_Status_Bit	0x1
+
+#define M_SERIES_EEPROM_SIZE 1024
+
+typedef struct ni_board_struct{
+	unsigned short device_id;
+	int isapnp_id;
+	char *name;
+
+	int n_adchan;
+	int adbits;
+
+	int ai_fifo_depth;
+	unsigned int alwaysdither : 1;
+	int gainlkup;
+	int ai_speed;
+
+	int n_aochan;
+	int aobits;
+	struct a4l_rngdesc *ao_range_table;
+	int ao_fifo_depth;
+
+	unsigned ao_speed;
+
+	unsigned num_p0_dio_channels;
+
+	int reg_type;
+	unsigned int ao_unipolar : 1;
+	unsigned int has_8255 : 1;
+	unsigned int has_analog_trig : 1;
+
+	enum caldac_enum caldac[3];
+} ni_board;
+
+#define n_ni_boards  (sizeof(ni_boards)/sizeof(ni_board))
+
+#define MAX_N_CALDACS 34
+#define MAX_N_AO_CHAN 8
+#define NUM_GPCT 2
+
+#define NI_PRIVATE_COMMON					\
+	uint16_t (*stc_readw)(struct a4l_device *dev, int register);	\
+	uint32_t (*stc_readl)(struct a4l_device *dev, int register);	\
+	void (*stc_writew)(struct a4l_device *dev, uint16_t value, int register);	\
+	void (*stc_writel)(struct a4l_device *dev, uint32_t value, int register);	\
+	\
+	int dio_state;						\
+	int pfi_state;						\
+	int io_bits;						\
+	unsigned short dio_output;				\
+	unsigned short dio_control;				\
+	int ao0p,ao1p;						\
+	int lastchan;						\
+	int last_do;						\
+	int rt_irq;						\
+	int irq_polarity;					\
+	int irq_pin;						\
+	int aimode;						\
+	int ai_continuous;					\
+	int blocksize;						\
+	int n_left;						\
+	unsigned int ai_calib_source;				\
+	unsigned int ai_calib_source_enabled;			\
+	rtdm_lock_t window_lock; \
+	rtdm_lock_t soft_reg_copy_lock; \
+	rtdm_lock_t mite_channel_lock; \
+								\
+	int changain_state;					\
+	unsigned int changain_spec;				\
+								\
+	unsigned int caldac_maxdata_list[MAX_N_CALDACS];	\
+	unsigned short ao[MAX_N_AO_CHAN];					\
+	unsigned short caldacs[MAX_N_CALDACS];				\
+								\
+	unsigned short ai_cmd2;	\
+								\
+	unsigned short ao_conf[MAX_N_AO_CHAN];				\
+	unsigned short ao_mode1;				\
+	unsigned short ao_mode2;				\
+	unsigned short ao_mode3;				\
+	unsigned short ao_cmd1;					\
+	unsigned short ao_cmd2;					\
+	unsigned short ao_cmd3;					\
+	unsigned short ao_trigger_select;			\
+								\
+	struct ni_gpct_device *counter_dev;	\
+	unsigned short an_trig_etc_reg;				\
+								\
+	unsigned ai_offset[512];				\
+								\
+	unsigned long serial_interval_ns;                       \
+	unsigned char serial_hw_mode;                           \
+	unsigned short clock_and_fout;				\
+	unsigned short clock_and_fout2;				\
+								\
+	unsigned short int_a_enable_reg;			\
+	unsigned short int_b_enable_reg;			\
+	unsigned short io_bidirection_pin_reg;			\
+	unsigned short rtsi_trig_direction_reg;			\
+	unsigned short rtsi_trig_a_output_reg; \
+	unsigned short rtsi_trig_b_output_reg; \
+	unsigned short pfi_output_select_reg[NUM_PFI_OUTPUT_SELECT_REGS]; \
+	unsigned short ai_ao_select_reg; \
+	unsigned short g0_g1_select_reg; \
+	unsigned short cdio_dma_select_reg; \
+	\
+	unsigned clock_ns; \
+	unsigned clock_source; \
+	\
+	unsigned short atrig_mode;				\
+	unsigned short atrig_high;				\
+	unsigned short atrig_low;				\
+	\
+	unsigned short pwm_up_count;	\
+	unsigned short pwm_down_count;	\
+	\
+	sampl_t ai_fifo_buffer[0x2000];				\
+	uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE]; \
+	\
+	struct mite_struct *mite; \
+	struct mite_channel *ai_mite_chan; \
+	struct mite_channel *ao_mite_chan;\
+	struct mite_channel *cdo_mite_chan;\
+	struct mite_dma_descriptor_ring *ai_mite_ring; \
+	struct mite_dma_descriptor_ring *ao_mite_ring; \
+	struct mite_dma_descriptor_ring *cdo_mite_ring; \
+	struct mite_dma_descriptor_ring *gpct_mite_ring[NUM_GPCT]; \
+	subd_8255_t subd_8255
+
+
+typedef struct {
+	ni_board *board_ptr;
+	NI_PRIVATE_COMMON;
+} ni_private;
+
+#define devpriv ((ni_private *)dev->priv)
+#define boardtype (*(ni_board *)devpriv->board_ptr)
+
+/* How we access registers */
+
+#define ni_writel(a,b)	(writel((a), devpriv->mite->daq_io_addr + (b)))
+#define ni_readl(a)	(readl(devpriv->mite->daq_io_addr + (a)))
+#define ni_writew(a,b)	(writew((a), devpriv->mite->daq_io_addr + (b)))
+#define ni_readw(a)	(readw(devpriv->mite->daq_io_addr + (a)))
+#define ni_writeb(a,b)	(writeb((a), devpriv->mite->daq_io_addr + (b)))
+#define ni_readb(a)	(readb(devpriv->mite->daq_io_addr + (a)))
+
+/* INSN_CONFIG_SET_CLOCK_SRC argument for NI cards */
+#define NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC	0 /* 10 MHz */
+#define NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC	1 /* 100 KHz */
+
+#endif /* _ANALOGY_NI_STC_H */
+++ linux-patched/drivers/xenomai/analogy/national_instruments/Makefile	2022-03-21 12:58:31.093872345 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/tio_common.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) += analogy_ni_mite.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_TIO) += analogy_ni_tio.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_MIO) += analogy_ni_mio.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_PCIMIO) += analogy_ni_pcimio.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_670x) += analogy_ni_670x.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_660x) += analogy_ni_660x.o
+
+analogy_ni_mite-y := mite.o
+analogy_ni_tio-y := tio_common.o
+analogy_ni_mio-y := mio_common.o
+analogy_ni_pcimio-y := pcimio.o
+analogy_ni_670x-y := ni_670x.o
+analogy_ni_660x-y := ni_660x.o
+++ linux-patched/drivers/xenomai/analogy/national_instruments/tio_common.c	2022-03-21 12:58:31.086872413 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/national_instruments/ni_tio.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI general purpose counter
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Description: National Instruments general purpose counters
+ * This module is not used directly by end-users.  Rather, it is used
+ * by other drivers (for example ni_660x and ni_pcimio) to provide
+ * support for NI's general purpose counters.  It was originally based
+ * on the counter code from ni_660x.c and ni_mio_common.c.
+ *
+ * Author:
+ * J.P. Mellor <jpmellor@rose-hulman.edu>
+ * Herman.Bruyninckx@mech.kuleuven.ac.be
+ * Wim.Meeussen@mech.kuleuven.ac.be,
+ * Klaas.Gadeyne@mech.kuleuven.ac.be,
+ * Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * References:
+ * DAQ 660x Register-Level Programmer Manual  (NI 370505A-01)
+ * DAQ 6601/6602 User Manual (NI 322137B-01)
+ * 340934b.pdf  DAQ-STC reference manual
+ *
+ * TODO:
+ * - Support use of both banks X and Y
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <rtdm/analogy/device.h>
+
+#include "ni_tio.h"
+#include "ni_mio.h"
+
+static inline void write_register(struct ni_gpct *counter,
+				  unsigned int bits, enum ni_gpct_register reg)
+{
+	BUG_ON(reg >= NITIO_Num_Registers);
+	counter->counter_dev->write_register(counter, bits, reg);
+}
+
+static inline unsigned int read_register(struct ni_gpct *counter,
+				     enum ni_gpct_register reg)
+{
+	BUG_ON(reg >= NITIO_Num_Registers);
+	return counter->counter_dev->read_register(counter, reg);
+}
+
+struct ni_gpct_device *a4l_ni_gpct_device_construct(struct a4l_device * dev,
+	void (*write_register) (struct ni_gpct * counter, unsigned int bits,
+		enum ni_gpct_register reg),
+	unsigned int (*read_register) (struct ni_gpct * counter,
+		enum ni_gpct_register reg), enum ni_gpct_variant variant,
+	unsigned int num_counters)
+{
+	struct ni_gpct_device *counter_dev =
+		kmalloc(sizeof(struct ni_gpct_device), GFP_KERNEL);
+	if (counter_dev == NULL)
+		return NULL;
+
+	memset(counter_dev, 0, sizeof(struct ni_gpct_device));
+
+	counter_dev->dev = dev;
+	counter_dev->write_register = write_register;
+	counter_dev->read_register = read_register;
+	counter_dev->variant = variant;
+	rtdm_lock_init(&counter_dev->regs_lock);
+	BUG_ON(num_counters == 0);
+
+	counter_dev->counters =
+		kmalloc(sizeof(struct ni_gpct *) * num_counters, GFP_KERNEL);
+
+	if (counter_dev->counters == NULL) {
+		 kfree(counter_dev);
+		return NULL;
+	}
+
+	memset(counter_dev->counters, 0, sizeof(struct ni_gpct *) * num_counters);
+
+	counter_dev->num_counters = num_counters;
+	return counter_dev;
+}
+
+void a4l_ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
+{
+	if (counter_dev->counters == NULL)
+		return;
+	kfree(counter_dev->counters);
+	kfree(counter_dev);
+}
+
+static
+int ni_tio_counting_mode_registers_present(const struct ni_gpct_device *counter_dev)
+{
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		return 1;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static
+int ni_tio_second_gate_registers_present(const struct ni_gpct_device *counter_dev)
+{
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		return 1;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline
+void ni_tio_set_bits_transient(struct ni_gpct *counter,
+			       enum ni_gpct_register register_index,
+			       unsigned int bit_mask,
+			       unsigned int bit_values,
+			       unsigned transient_bit_values)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned long flags;
+
+	BUG_ON(register_index >= NITIO_Num_Registers);
+	rtdm_lock_get_irqsave(&counter_dev->regs_lock, flags);
+	counter_dev->regs[register_index] &= ~bit_mask;
+	counter_dev->regs[register_index] |= (bit_values & bit_mask);
+	write_register(counter,
+		       counter_dev->regs[register_index] | transient_bit_values,
+		       register_index);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&counter_dev->regs_lock, flags);
+}
+
+/* ni_tio_set_bits( ) is for safely writing to registers whose bits
+   may be twiddled in interrupt context, or whose software copy may be
+   read in interrupt context. */
+static inline void ni_tio_set_bits(struct ni_gpct *counter,
+				   enum ni_gpct_register register_index,
+				   unsigned int bit_mask,
+				   unsigned int bit_values)
+{
+	ni_tio_set_bits_transient(counter,
+				  register_index,
+				  bit_mask, bit_values, 0x0);
+}
+
+/* ni_tio_get_soft_copy( ) is for safely reading the software copy of
+   a register whose bits might be modified in interrupt context, or whose
+   software copy might need to be read in interrupt context. */
+static inline
+unsigned int ni_tio_get_soft_copy(const struct ni_gpct *counter,
+				  enum ni_gpct_register register_index)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned long flags;
+	unsigned value;
+
+	BUG_ON(register_index >= NITIO_Num_Registers);
+	rtdm_lock_get_irqsave(&counter_dev->regs_lock, flags);
+	value = counter_dev->regs[register_index];
+	rtdm_lock_put_irqrestore(&counter_dev->regs_lock, flags);
+	return value;
+}
+
+static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter)
+{
+	write_register(counter, Gi_Reset_Bit(counter->counter_index),
+		       NITIO_Gxx_Joint_Reset_Reg(counter->counter_index));
+}
+
+void a4l_ni_tio_init_counter(struct ni_gpct *counter)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+
+	ni_tio_reset_count_and_disarm(counter);
+	/* Initialize counter registers */
+	counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)] =
+		0x0;
+	write_register(counter,
+		counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter->
+				counter_index)],
+		NITIO_Gi_Autoincrement_Reg(counter->counter_index));
+	ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+		~0, Gi_Synchronize_Gate_Bit);
+	ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), ~0,
+		0);
+	counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] = 0x0;
+	write_register(counter,
+		counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)],
+		NITIO_Gi_LoadA_Reg(counter->counter_index));
+	counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] = 0x0;
+	write_register(counter,
+		counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)],
+		NITIO_Gi_LoadB_Reg(counter->counter_index));
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index), ~0, 0);
+	if (ni_tio_counting_mode_registers_present(counter_dev)) {
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Counting_Mode_Reg(counter->counter_index), ~0,
+			0);
+	}
+	if (ni_tio_second_gate_registers_present(counter_dev)) {
+		counter_dev->regs[NITIO_Gi_Second_Gate_Reg(counter->
+				counter_index)] = 0x0;
+		write_register(counter,
+			counter_dev->regs[NITIO_Gi_Second_Gate_Reg(counter->
+					counter_index)],
+			NITIO_Gi_Second_Gate_Reg(counter->counter_index));
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_DMA_Config_Reg(counter->counter_index), ~0, 0x0);
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index), ~0, 0x0);
+}
+
+static lsampl_t ni_tio_counter_status(struct ni_gpct *counter)
+{
+	lsampl_t status = 0;
+	unsigned int bits;
+
+	bits = read_register(counter,NITIO_Gxx_Status_Reg(counter->counter_index));
+	if (bits & Gi_Armed_Bit(counter->counter_index)) {
+		status |= A4L_COUNTER_ARMED;
+		if (bits & Gi_Counting_Bit(counter->counter_index))
+			status |= A4L_COUNTER_COUNTING;
+	}
+	return status;
+}
+
+static
+uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
+				unsigned int generic_clock_source);
+static
+unsigned int ni_tio_generic_clock_src_select(const struct ni_gpct *counter);
+
+static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned counting_mode_reg =
+		NITIO_Gi_Counting_Mode_Reg(counter->counter_index);
+	static const uint64_t min_normal_sync_period_ps = 25000;
+	const uint64_t clock_period_ps = ni_tio_clock_period_ps(counter,
+		ni_tio_generic_clock_src_select(counter));
+
+	if (ni_tio_counting_mode_registers_present(counter_dev) == 0)
+		return;
+
+	switch (ni_tio_get_soft_copy(counter,
+			counting_mode_reg) & Gi_Counting_Mode_Mask) {
+	case Gi_Counting_Mode_QuadratureX1_Bits:
+	case Gi_Counting_Mode_QuadratureX2_Bits:
+	case Gi_Counting_Mode_QuadratureX4_Bits:
+	case Gi_Counting_Mode_Sync_Source_Bits:
+		force_alt_sync = 1;
+		break;
+	default:
+		break;
+	}
+
+	/* It's not clear what we should do if clock_period is
+	unknown, so we are not using the alt sync bit in that case,
+	but allow the caller to decide by using the force_alt_sync
+	parameter. */
+	if (force_alt_sync ||
+		(clock_period_ps
+			&& clock_period_ps < min_normal_sync_period_ps)) {
+		ni_tio_set_bits(counter, counting_mode_reg,
+			Gi_Alternate_Sync_Bit(counter_dev->variant),
+			Gi_Alternate_Sync_Bit(counter_dev->variant));
+	} else {
+		ni_tio_set_bits(counter, counting_mode_reg,
+			Gi_Alternate_Sync_Bit(counter_dev->variant), 0x0);
+	}
+}
+
+static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned int mode)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned mode_reg_mask;
+	unsigned mode_reg_values;
+	unsigned input_select_bits = 0;
+
+	/* these bits map directly on to the mode register */
+	static const unsigned mode_reg_direct_mask =
+		NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK |
+		NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK |
+		NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT |
+		NI_GPCT_LOADING_ON_GATE_BIT | NI_GPCT_LOAD_B_SELECT_BIT;
+
+	mode_reg_mask = mode_reg_direct_mask | Gi_Reload_Source_Switching_Bit;
+	mode_reg_values = mode & mode_reg_direct_mask;
+	switch (mode & NI_GPCT_RELOAD_SOURCE_MASK) {
+	case NI_GPCT_RELOAD_SOURCE_FIXED_BITS:
+		break;
+	case NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS:
+		mode_reg_values |= Gi_Reload_Source_Switching_Bit;
+		break;
+	case NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS:
+		input_select_bits |= Gi_Gate_Select_Load_Source_Bit;
+		mode_reg_mask |= Gi_Gating_Mode_Mask;
+		mode_reg_values |= Gi_Level_Gating_Bits;
+		break;
+	default:
+		break;
+	}
+	ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index),
+		mode_reg_mask, mode_reg_values);
+
+	if (ni_tio_counting_mode_registers_present(counter_dev)) {
+		unsigned counting_mode_bits = 0;
+		counting_mode_bits |=
+			(mode >> NI_GPCT_COUNTING_MODE_SHIFT) &
+			Gi_Counting_Mode_Mask;
+		counting_mode_bits |=
+			((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT) <<
+			Gi_Index_Phase_Bitshift) & Gi_Index_Phase_Mask;
+		if (mode & NI_GPCT_INDEX_ENABLE_BIT) {
+			counting_mode_bits |= Gi_Index_Mode_Bit;
+		}
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Counting_Mode_Reg(counter->counter_index),
+			Gi_Counting_Mode_Mask | Gi_Index_Phase_Mask |
+			Gi_Index_Mode_Bit, counting_mode_bits);
+		ni_tio_set_sync_mode(counter, 0);
+	}
+
+	ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+		Gi_Up_Down_Mask,
+		(mode >> NI_GPCT_COUNTING_DIRECTION_SHIFT) << Gi_Up_Down_Shift);
+
+	if (mode & NI_GPCT_OR_GATE_BIT) {
+		input_select_bits |= Gi_Or_Gate_Bit;
+	}
+	if (mode & NI_GPCT_INVERT_OUTPUT_BIT) {
+		input_select_bits |= Gi_Output_Polarity_Bit;
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Gate_Select_Load_Source_Bit | Gi_Or_Gate_Bit |
+		Gi_Output_Polarity_Bit, input_select_bits);
+
+	return 0;
+}
+
+static int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned int start_trigger)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+
+	unsigned int command_transient_bits = 0;
+
+	if (arm) {
+		switch (start_trigger) {
+		case NI_GPCT_ARM_IMMEDIATE:
+			command_transient_bits |= Gi_Arm_Bit;
+			break;
+		case NI_GPCT_ARM_PAIRED_IMMEDIATE:
+			command_transient_bits |= Gi_Arm_Bit | Gi_Arm_Copy_Bit;
+			break;
+		default:
+			break;
+		}
+		if (ni_tio_counting_mode_registers_present(counter_dev)) {
+			unsigned counting_mode_bits = 0;
+
+			switch (start_trigger) {
+			case NI_GPCT_ARM_IMMEDIATE:
+			case NI_GPCT_ARM_PAIRED_IMMEDIATE:
+				break;
+			default:
+				if (start_trigger & NI_GPCT_ARM_UNKNOWN) {
+					/* Pass-through the least
+					significant bits so we can
+					figure out what select later
+					*/
+					unsigned hw_arm_select_bits =
+						(start_trigger <<
+						Gi_HW_Arm_Select_Shift) &
+						Gi_HW_Arm_Select_Mask
+						(counter_dev->variant);
+
+					counting_mode_bits |=
+						Gi_HW_Arm_Enable_Bit |
+						hw_arm_select_bits;
+				} else {
+					return -EINVAL;
+				}
+				break;
+			}
+			ni_tio_set_bits(counter,
+				NITIO_Gi_Counting_Mode_Reg(counter->
+					counter_index),
+				Gi_HW_Arm_Select_Mask(counter_dev->
+					variant) | Gi_HW_Arm_Enable_Bit,
+				counting_mode_bits);
+		}
+	} else {
+		command_transient_bits |= Gi_Disarm_Bit;
+	}
+	ni_tio_set_bits_transient(counter,
+		NITIO_Gi_Command_Reg(counter->counter_index), 0, 0,
+		command_transient_bits);
+	return 0;
+}
+
+static unsigned int ni_660x_source_select_bits(lsampl_t clock_source)
+{
+	unsigned int ni_660x_clock;
+	unsigned int i;
+	const unsigned int clock_select_bits =
+		clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+
+	switch (clock_select_bits) {
+	case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Timebase_1_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Timebase_2_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Timebase_3_Clock;
+		break;
+	case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Logic_Low_Clock;
+		break;
+	case NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Source_Pin_i_Clock;
+		break;
+	case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Next_Gate_Clock;
+		break;
+	case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Next_TC_Clock;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (clock_select_bits == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) {
+				ni_660x_clock = NI_660x_RTSI_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_source_pin; ++i) {
+			if (clock_select_bits ==
+				NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i)) {
+				ni_660x_clock = NI_660x_Source_Pin_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_source_pin)
+			break;
+		ni_660x_clock = 0;
+		BUG();
+		break;
+	}
+	return Gi_Source_Select_Bits(ni_660x_clock);
+}
+
+static unsigned int ni_m_series_source_select_bits(lsampl_t clock_source)
+{
+	unsigned int ni_m_series_clock;
+	unsigned int i;
+	const unsigned int clock_select_bits =
+		clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+	switch (clock_select_bits) {
+	case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Timebase_1_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Timebase_2_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Timebase_3_Clock;
+		break;
+	case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Logic_Low_Clock;
+		break;
+	case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Next_Gate_Clock;
+		break;
+	case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Next_TC_Clock;
+		break;
+	case NI_GPCT_PXI10_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_PXI10_Clock;
+		break;
+	case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_PXI_Star_Trigger_Clock;
+		break;
+	case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Analog_Trigger_Out_Clock;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (clock_select_bits == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) {
+				ni_m_series_clock = NI_M_Series_RTSI_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (clock_select_bits == NI_GPCT_PFI_CLOCK_SRC_BITS(i)) {
+				ni_m_series_clock = NI_M_Series_PFI_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		__a4l_err("invalid clock source 0x%lx\n",
+			     (unsigned long)clock_source);
+		BUG();
+		ni_m_series_clock = 0;
+		break;
+	}
+	return Gi_Source_Select_Bits(ni_m_series_clock);
+}
+
+static void ni_tio_set_source_subselect(struct ni_gpct *counter,
+					lsampl_t clock_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+
+	if (counter_dev->variant != ni_gpct_variant_m_series)
+		return;
+	switch (clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
+		/* Gi_Source_Subselect is zero */
+	case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		counter_dev->regs[second_gate_reg] &= ~Gi_Source_Subselect_Bit;
+		break;
+		/* Gi_Source_Subselect is one */
+	case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS:
+	case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS:
+		counter_dev->regs[second_gate_reg] |= Gi_Source_Subselect_Bit;
+		break;
+		/* Gi_Source_Subselect doesn't matter */
+	default:
+		return;
+		break;
+	}
+	write_register(counter, counter_dev->regs[second_gate_reg],
+		second_gate_reg);
+}
+
+static int ni_tio_set_clock_src(struct ni_gpct *counter,
+				lsampl_t clock_source, lsampl_t period_ns)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned input_select_bits = 0;
+	static const uint64_t pico_per_nano = 1000;
+
+	/* FIXME: validate clock source */
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_660x:
+		input_select_bits |= ni_660x_source_select_bits(clock_source);
+		break;
+	case ni_gpct_variant_e_series:
+	case ni_gpct_variant_m_series:
+		input_select_bits |=
+			ni_m_series_source_select_bits(clock_source);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT)
+		input_select_bits |= Gi_Source_Polarity_Bit;
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Source_Select_Mask | Gi_Source_Polarity_Bit,
+		input_select_bits);
+	ni_tio_set_source_subselect(counter, clock_source);
+	if (ni_tio_counting_mode_registers_present(counter_dev)) {
+		const unsigned prescaling_mode =
+			clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK;
+		unsigned counting_mode_bits = 0;
+
+		switch (prescaling_mode) {
+		case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS:
+			break;
+		case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS:
+			counting_mode_bits |=
+				Gi_Prescale_X2_Bit(counter_dev->variant);
+			break;
+		case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS:
+			counting_mode_bits |=
+				Gi_Prescale_X8_Bit(counter_dev->variant);
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Counting_Mode_Reg(counter->counter_index),
+			Gi_Prescale_X2_Bit(counter_dev->
+				variant) | Gi_Prescale_X8_Bit(counter_dev->
+				variant), counting_mode_bits);
+	}
+	counter->clock_period_ps = pico_per_nano * period_ns;
+	ni_tio_set_sync_mode(counter, 0);
+	return 0;
+}
+
+static unsigned int ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned counting_mode_bits = ni_tio_get_soft_copy(counter,
+		NITIO_Gi_Counting_Mode_Reg(counter->counter_index));
+	unsigned int bits = 0;
+
+	if (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Input_Select_Reg(counter->
+				counter_index)) & Gi_Source_Polarity_Bit)
+		bits |= NI_GPCT_INVERT_CLOCK_SRC_BIT;
+	if (counting_mode_bits & Gi_Prescale_X2_Bit(counter_dev->variant))
+		bits |= NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS;
+	if (counting_mode_bits & Gi_Prescale_X8_Bit(counter_dev->variant))
+		bits |= NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS;
+	return bits;
+}
+
+static unsigned int ni_m_series_clock_src_select(const struct ni_gpct *counter)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	unsigned int i, clock_source = 0;
+
+	const unsigned int input_select = (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Input_Select_Reg(counter->
+				counter_index)) & Gi_Source_Select_Mask) >>
+		Gi_Source_Select_Shift;
+
+	switch (input_select) {
+	case NI_M_Series_Timebase_1_Clock:
+		clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Timebase_2_Clock:
+		clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Timebase_3_Clock:
+		if (counter_dev->
+			regs[second_gate_reg] & Gi_Source_Subselect_Bit)
+			clock_source =
+				NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS;
+		else
+			clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Logic_Low_Clock:
+		clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Next_Gate_Clock:
+		if (counter_dev->
+			regs[second_gate_reg] & Gi_Source_Subselect_Bit)
+			clock_source = NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS;
+		else
+			clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_PXI10_Clock:
+		clock_source = NI_GPCT_PXI10_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Next_TC_Clock:
+		clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (input_select == NI_M_Series_RTSI_Clock(i)) {
+				clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (input_select == NI_M_Series_PFI_Clock(i)) {
+				clock_source = NI_GPCT_PFI_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		BUG();
+		break;
+	}
+	clock_source |= ni_tio_clock_src_modifiers(counter);
+	return clock_source;
+}
+
+static unsigned int ni_660x_clock_src_select(const struct ni_gpct *counter)
+{
+	unsigned int i, clock_source = 0;
+	const unsigned input_select = (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Input_Select_Reg(counter->
+				counter_index)) & Gi_Source_Select_Mask) >>
+		Gi_Source_Select_Shift;
+
+	switch (input_select) {
+	case NI_660x_Timebase_1_Clock:
+		clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Timebase_2_Clock:
+		clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Timebase_3_Clock:
+		clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Logic_Low_Clock:
+		clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Source_Pin_i_Clock:
+		clock_source = NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Next_Gate_Clock:
+		clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Next_TC_Clock:
+		clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (input_select == NI_660x_RTSI_Clock(i)) {
+				clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_source_pin; ++i) {
+			if (input_select == NI_660x_Source_Pin_Clock(i)) {
+				clock_source =
+					NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_source_pin)
+			break;
+		BUG();
+		break;
+	}
+	clock_source |= ni_tio_clock_src_modifiers(counter);
+	return clock_source;
+}
+
+static unsigned int ni_tio_generic_clock_src_select(const struct ni_gpct *counter)
+{
+	switch (counter->counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+	case ni_gpct_variant_m_series:
+		return ni_m_series_clock_src_select(counter);
+		break;
+	case ni_gpct_variant_660x:
+		return ni_660x_clock_src_select(counter);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
+				       unsigned int generic_clock_source)
+{
+	uint64_t clock_period_ps;
+
+	switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
+	case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
+		clock_period_ps = 50000;
+		break;
+	case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
+		clock_period_ps = 10000000;
+		break;
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		clock_period_ps = 12500;
+		break;
+	case NI_GPCT_PXI10_CLOCK_SRC_BITS:
+		clock_period_ps = 100000;
+		break;
+	default:
+		/* Clock period is specified by user with prescaling
+		   already taken into account. */
+		return counter->clock_period_ps;
+		break;
+	}
+
+	switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
+	case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS:
+		break;
+	case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS:
+		clock_period_ps *= 2;
+		break;
+	case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS:
+		clock_period_ps *= 8;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return clock_period_ps;
+}
+
+static void ni_tio_get_clock_src(struct ni_gpct *counter,
+				 unsigned int * clock_source,
+				 unsigned int * period_ns)
+{
+	static const unsigned int pico_per_nano = 1000;
+	uint64_t temp64;
+
+	*clock_source = ni_tio_generic_clock_src_select(counter);
+	temp64 = ni_tio_clock_period_ps(counter, *clock_source);
+	do_div(temp64, pico_per_nano);
+	*period_ns = temp64;
+}
+
+static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter,
+					    lsampl_t gate_source)
+{
+	const unsigned int mode_mask = Gi_Gate_Polarity_Bit | Gi_Gating_Mode_Mask;
+	unsigned int mode_values = 0;
+
+	if (gate_source & CR_INVERT) {
+		mode_values |= Gi_Gate_Polarity_Bit;
+	}
+	if (gate_source & CR_EDGE) {
+		mode_values |= Gi_Rising_Edge_Gating_Bits;
+	} else {
+		mode_values |= Gi_Level_Gating_Bits;
+	}
+	ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index),
+		mode_mask, mode_values);
+}
+
+static int ni_660x_set_first_gate(struct ni_gpct *counter, lsampl_t gate_source)
+{
+	const unsigned int selected_gate = CR_CHAN(gate_source);
+	/* Bits of selected_gate that may be meaningful to
+	   input select register */
+	const unsigned int selected_gate_mask = 0x1f;
+	unsigned ni_660x_gate_select;
+	unsigned i;
+
+	switch (selected_gate) {
+	case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
+		ni_660x_gate_select = NI_660x_Next_SRC_Gate_Select;
+		break;
+	case NI_GPCT_NEXT_OUT_GATE_SELECT:
+	case NI_GPCT_LOGIC_LOW_GATE_SELECT:
+	case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
+	case NI_GPCT_GATE_PIN_i_GATE_SELECT:
+		ni_660x_gate_select = selected_gate & selected_gate_mask;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (selected_gate == NI_GPCT_RTSI_GATE_SELECT(i)) {
+				ni_660x_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_gate_pin; ++i) {
+			if (selected_gate == NI_GPCT_GATE_PIN_GATE_SELECT(i)) {
+				ni_660x_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_gate_pin)
+			break;
+		return -EINVAL;
+		break;
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Gate_Select_Mask, Gi_Gate_Select_Bits(ni_660x_gate_select));
+	return 0;
+}
+
+static int ni_m_series_set_first_gate(struct ni_gpct *counter,
+				      lsampl_t gate_source)
+{
+	const unsigned int selected_gate = CR_CHAN(gate_source);
+	/* bits of selected_gate that may be meaningful to input select register */
+	const unsigned int selected_gate_mask = 0x1f;
+	unsigned int i, ni_m_series_gate_select;
+
+	switch (selected_gate) {
+	case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT:
+	case NI_GPCT_AI_START2_GATE_SELECT:
+	case NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT:
+	case NI_GPCT_NEXT_OUT_GATE_SELECT:
+	case NI_GPCT_AI_START1_GATE_SELECT:
+	case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
+	case NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT:
+	case NI_GPCT_LOGIC_LOW_GATE_SELECT:
+		ni_m_series_gate_select = selected_gate & selected_gate_mask;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (selected_gate == NI_GPCT_RTSI_GATE_SELECT(i)) {
+				ni_m_series_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (selected_gate == NI_GPCT_PFI_GATE_SELECT(i)) {
+				ni_m_series_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		return -EINVAL;
+		break;
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Gate_Select_Mask,
+		Gi_Gate_Select_Bits(ni_m_series_gate_select));
+	return 0;
+}
+
+static int ni_660x_set_second_gate(struct ni_gpct *counter,
+				   lsampl_t gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	const unsigned int selected_second_gate = CR_CHAN(gate_source);
+	/* bits of second_gate that may be meaningful to second gate register */
+	static const unsigned int selected_second_gate_mask = 0x1f;
+	unsigned int i, ni_660x_second_gate_select;
+
+	switch (selected_second_gate) {
+	case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
+	case NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT:
+	case NI_GPCT_SELECTED_GATE_GATE_SELECT:
+	case NI_GPCT_NEXT_OUT_GATE_SELECT:
+	case NI_GPCT_LOGIC_LOW_GATE_SELECT:
+		ni_660x_second_gate_select =
+			selected_second_gate & selected_second_gate_mask;
+		break;
+	case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
+		ni_660x_second_gate_select =
+			NI_660x_Next_SRC_Second_Gate_Select;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (selected_second_gate == NI_GPCT_RTSI_GATE_SELECT(i)) {
+				ni_660x_second_gate_select =
+					selected_second_gate &
+					selected_second_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_up_down_pin; ++i) {
+			if (selected_second_gate ==
+				NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i)) {
+				ni_660x_second_gate_select =
+					selected_second_gate &
+					selected_second_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_up_down_pin)
+			break;
+		return -EINVAL;
+		break;
+	};
+	counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Mode_Bit;
+	counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Select_Mask;
+	counter_dev->regs[second_gate_reg] |=
+		Gi_Second_Gate_Select_Bits(ni_660x_second_gate_select);
+	write_register(counter, counter_dev->regs[second_gate_reg],
+		second_gate_reg);
+	return 0;
+}
+
+static int ni_m_series_set_second_gate(struct ni_gpct *counter,
+				       lsampl_t gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	const unsigned int selected_second_gate = CR_CHAN(gate_source);
+	/* Bits of second_gate that may be meaningful to second gate register */
+	static const unsigned int selected_second_gate_mask = 0x1f;
+	unsigned int ni_m_series_second_gate_select;
+
+	/* FIXME: We don't know what the m-series second gate codes
+	   are, so we'll just pass the bits through for now. */
+	switch (selected_second_gate) {
+	default:
+		ni_m_series_second_gate_select =
+			selected_second_gate & selected_second_gate_mask;
+		break;
+	};
+	counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Mode_Bit;
+	counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Select_Mask;
+	counter_dev->regs[second_gate_reg] |=
+		Gi_Second_Gate_Select_Bits(ni_m_series_second_gate_select);
+	write_register(counter, counter_dev->regs[second_gate_reg],
+		second_gate_reg);
+	return 0;
+}
+
+static int ni_tio_set_gate_src(struct ni_gpct *counter,
+			       unsigned int gate_index, lsampl_t gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+
+	switch (gate_index) {
+	case 0:
+		if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) {
+			ni_tio_set_bits(counter,
+				NITIO_Gi_Mode_Reg(counter->counter_index),
+				Gi_Gating_Mode_Mask, Gi_Gating_Disabled_Bits);
+			return 0;
+		}
+		ni_tio_set_first_gate_modifiers(counter, gate_source);
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_e_series:
+		case ni_gpct_variant_m_series:
+			return ni_m_series_set_first_gate(counter, gate_source);
+			break;
+		case ni_gpct_variant_660x:
+			return ni_660x_set_first_gate(counter, gate_source);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		break;
+	case 1:
+		if (ni_tio_second_gate_registers_present(counter_dev) == 0)
+			return -EINVAL;
+		if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) {
+			counter_dev->regs[second_gate_reg] &=
+				~Gi_Second_Gate_Mode_Bit;
+			write_register(counter,
+				counter_dev->regs[second_gate_reg],
+				second_gate_reg);
+			return 0;
+		}
+		if (gate_source & CR_INVERT) {
+			counter_dev->regs[second_gate_reg] |=
+				Gi_Second_Gate_Polarity_Bit;
+		} else {
+			counter_dev->regs[second_gate_reg] &=
+				~Gi_Second_Gate_Polarity_Bit;
+		}
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_m_series:
+			return ni_m_series_set_second_gate(counter,
+				gate_source);
+			break;
+		case ni_gpct_variant_660x:
+			return ni_660x_set_second_gate(counter, gate_source);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static int ni_tio_set_other_src(struct ni_gpct *counter,
+				unsigned int index, unsigned int source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+
+	if (counter_dev->variant == ni_gpct_variant_m_series) {
+		unsigned int abz_reg, shift, mask;
+
+		abz_reg = NITIO_Gi_ABZ_Reg(counter->counter_index);
+		switch (index) {
+		case NI_GPCT_SOURCE_ENCODER_A:
+			shift = 10;
+			break;
+		case NI_GPCT_SOURCE_ENCODER_B:
+			shift = 5;
+			break;
+		case NI_GPCT_SOURCE_ENCODER_Z:
+			shift = 0;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		mask = 0x1f << shift;
+		if (source > 0x1f) {
+			/* Disable gate */
+			source = 0x1f;
+		}
+		counter_dev->regs[abz_reg] &= ~mask;
+		counter_dev->regs[abz_reg] |= (source << shift) & mask;
+		write_register(counter, counter_dev->regs[abz_reg], abz_reg);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static unsigned int ni_660x_first_gate_to_generic_gate_source(unsigned int ni_660x_gate_select)
+{
+	unsigned int i;
+
+	switch (ni_660x_gate_select) {
+	case NI_660x_Source_Pin_i_Gate_Select:
+		return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Gate_Pin_i_Gate_Select:
+		return NI_GPCT_GATE_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Next_SRC_Gate_Select:
+		return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+		break;
+	case NI_660x_Next_Out_Gate_Select:
+		return NI_GPCT_NEXT_OUT_GATE_SELECT;
+		break;
+	case NI_660x_Logic_Low_Gate_Select:
+		return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (ni_660x_gate_select == NI_660x_RTSI_Gate_Select(i)) {
+				return NI_GPCT_RTSI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_gate_pin; ++i) {
+			if (ni_660x_gate_select ==
+				NI_660x_Gate_Pin_Gate_Select(i)) {
+				return NI_GPCT_GATE_PIN_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_gate_pin)
+			break;
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static unsigned int ni_m_series_first_gate_to_generic_gate_source(unsigned int
+	ni_m_series_gate_select)
+{
+	unsigned int i;
+
+	switch (ni_m_series_gate_select) {
+	case NI_M_Series_Timestamp_Mux_Gate_Select:
+		return NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
+		break;
+	case NI_M_Series_AI_START2_Gate_Select:
+		return NI_GPCT_AI_START2_GATE_SELECT;
+		break;
+	case NI_M_Series_PXI_Star_Trigger_Gate_Select:
+		return NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
+		break;
+	case NI_M_Series_Next_Out_Gate_Select:
+		return NI_GPCT_NEXT_OUT_GATE_SELECT;
+		break;
+	case NI_M_Series_AI_START1_Gate_Select:
+		return NI_GPCT_AI_START1_GATE_SELECT;
+		break;
+	case NI_M_Series_Next_SRC_Gate_Select:
+		return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+		break;
+	case NI_M_Series_Analog_Trigger_Out_Gate_Select:
+		return NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
+		break;
+	case NI_M_Series_Logic_Low_Gate_Select:
+		return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (ni_m_series_gate_select ==
+				NI_M_Series_RTSI_Gate_Select(i)) {
+				return NI_GPCT_RTSI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (ni_m_series_gate_select ==
+				NI_M_Series_PFI_Gate_Select(i)) {
+				return NI_GPCT_PFI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static unsigned int ni_660x_second_gate_to_generic_gate_source(unsigned int
+	ni_660x_gate_select)
+{
+	unsigned int i;
+
+	switch (ni_660x_gate_select) {
+	case NI_660x_Source_Pin_i_Second_Gate_Select:
+		return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Up_Down_Pin_i_Second_Gate_Select:
+		return NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Next_SRC_Second_Gate_Select:
+		return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+		break;
+	case NI_660x_Next_Out_Second_Gate_Select:
+		return NI_GPCT_NEXT_OUT_GATE_SELECT;
+		break;
+	case NI_660x_Selected_Gate_Second_Gate_Select:
+		return NI_GPCT_SELECTED_GATE_GATE_SELECT;
+		break;
+	case NI_660x_Logic_Low_Second_Gate_Select:
+		return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (ni_660x_gate_select ==
+				NI_660x_RTSI_Second_Gate_Select(i)) {
+				return NI_GPCT_RTSI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_up_down_pin; ++i) {
+			if (ni_660x_gate_select ==
+				NI_660x_Up_Down_Pin_Second_Gate_Select(i)) {
+				return NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_up_down_pin)
+			break;
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static unsigned int ni_m_series_second_gate_to_generic_gate_source(unsigned int
+	ni_m_series_gate_select)
+{
+	/* FIXME: the second gate sources for the m series are
+	   undocumented, so we just return the raw bits for now. */
+	switch (ni_m_series_gate_select) {
+	default:
+		return ni_m_series_gate_select;
+		break;
+	}
+	return 0;
+};
+
+static int ni_tio_get_gate_src(struct ni_gpct *counter,
+			       unsigned int gate_index,
+			       unsigned int * gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int mode_bits = ni_tio_get_soft_copy(counter,
+		NITIO_Gi_Mode_Reg(counter->counter_index));
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	unsigned int gate_select_bits;
+
+	switch (gate_index) {
+	case 0:
+		if ((mode_bits & Gi_Gating_Mode_Mask) ==
+			Gi_Gating_Disabled_Bits) {
+			*gate_source = NI_GPCT_DISABLED_GATE_SELECT;
+			return 0;
+		} else {
+			gate_select_bits =
+				(ni_tio_get_soft_copy(counter,
+					NITIO_Gi_Input_Select_Reg(counter->
+						counter_index)) &
+				Gi_Gate_Select_Mask) >> Gi_Gate_Select_Shift;
+		}
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_e_series:
+		case ni_gpct_variant_m_series:
+			*gate_source =
+				ni_m_series_first_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		case ni_gpct_variant_660x:
+			*gate_source =
+				ni_660x_first_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		if (mode_bits & Gi_Gate_Polarity_Bit) {
+			*gate_source |= CR_INVERT;
+		}
+		if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) {
+			*gate_source |= CR_EDGE;
+		}
+		break;
+	case 1:
+		if ((mode_bits & Gi_Gating_Mode_Mask) == Gi_Gating_Disabled_Bits
+			|| (counter_dev->
+				regs[second_gate_reg] & Gi_Second_Gate_Mode_Bit)
+			== 0) {
+			*gate_source = NI_GPCT_DISABLED_GATE_SELECT;
+			return 0;
+		} else {
+			gate_select_bits =
+				(counter_dev->
+				regs[second_gate_reg] &
+				Gi_Second_Gate_Select_Mask) >>
+				Gi_Second_Gate_Select_Shift;
+		}
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_e_series:
+		case ni_gpct_variant_m_series:
+			*gate_source =
+				ni_m_series_second_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		case ni_gpct_variant_660x:
+			*gate_source =
+				ni_660x_second_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		if (counter_dev->
+			regs[second_gate_reg] & Gi_Second_Gate_Polarity_Bit) {
+			*gate_source |= CR_INVERT;
+		}
+		/* Second gate can't have edge/level mode set independently */
+		if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) {
+			*gate_source |= CR_EDGE;
+		}
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+int a4l_ni_tio_insn_config(struct ni_gpct *counter, struct a4l_kernel_instruction *insn)
+{
+	unsigned int *data = (unsigned int *)insn->data;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_SET_COUNTER_MODE:
+		return ni_tio_set_counter_mode(counter, data[1]);
+		break;
+	case A4L_INSN_CONFIG_ARM:
+		return ni_tio_arm(counter, 1, data[1]);
+		break;
+	case A4L_INSN_CONFIG_DISARM:
+		ni_tio_arm(counter, 0, 0);
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_GET_COUNTER_STATUS:
+		data[1] = ni_tio_counter_status(counter);
+		data[2] = counter_status_mask;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_SET_CLOCK_SRC:
+		return ni_tio_set_clock_src(counter, data[1], data[2]);
+		break;
+	case A4L_INSN_CONFIG_GET_CLOCK_SRC:
+		ni_tio_get_clock_src(counter, &data[1], &data[2]);
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_SET_GATE_SRC:
+		return ni_tio_set_gate_src(counter, data[1], data[2]);
+		break;
+	case A4L_INSN_CONFIG_GET_GATE_SRC:
+		return ni_tio_get_gate_src(counter, data[1], &data[2]);
+		break;
+	case A4L_INSN_CONFIG_SET_OTHER_SRC:
+		return ni_tio_set_other_src(counter, data[1], data[2]);
+		break;
+	case A4L_INSN_CONFIG_RESET:
+		ni_tio_reset_count_and_disarm(counter);
+		return 0;
+		break;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+int a4l_ni_tio_rinsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int channel = CR_CHAN(insn->chan_desc);
+	unsigned int first_read;
+	unsigned int second_read;
+	unsigned int correct_read;
+
+	uint32_t *data = (uint32_t *)insn->data;
+
+	if (insn->data_size != sizeof(uint32_t))
+		return -EINVAL;
+
+	switch (channel) {
+	case 0:
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Command_Reg(counter->counter_index),
+			Gi_Save_Trace_Bit, 0);
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Command_Reg(counter->counter_index),
+			Gi_Save_Trace_Bit, Gi_Save_Trace_Bit);
+		/* The count doesn't get latched until the next clock
+		   edge, so it is possible the count may change (once)
+		   while we are reading.  Since the read of the
+		   SW_Save_Reg isn't atomic (apparently even when it's a
+		   32 bit register according to 660x docs), we need to
+		   read twice and make sure the reading hasn't changed.
+		   If it has, a third read will be correct since the
+		   count value will definitely have latched by then. */
+		first_read =
+			read_register(counter,
+			NITIO_Gi_SW_Save_Reg(counter->counter_index));
+		second_read =
+			read_register(counter,
+			NITIO_Gi_SW_Save_Reg(counter->counter_index));
+		if (first_read != second_read)
+			correct_read =
+				read_register(counter,
+				NITIO_Gi_SW_Save_Reg(counter->counter_index));
+		else
+			correct_read = first_read;
+		data[0] = correct_read;
+		return 0;
+		break;
+	case 1:
+		data[0] = counter_dev->regs
+			[NITIO_Gi_LoadA_Reg(counter->counter_index)];
+		break;
+	case 2:
+		data[0] = counter_dev->regs
+			[NITIO_Gi_LoadB_Reg(counter->counter_index)];
+		break;
+	};
+
+	return 0;
+}
+
+static unsigned int ni_tio_next_load_register(struct ni_gpct *counter)
+{
+	const unsigned int bits = read_register(counter,
+		NITIO_Gxx_Status_Reg(counter->counter_index));
+
+	if (bits & Gi_Next_Load_Source_Bit(counter->counter_index)) {
+		return NITIO_Gi_LoadB_Reg(counter->counter_index);
+	} else {
+		return NITIO_Gi_LoadA_Reg(counter->counter_index);
+	}
+}
+
+int a4l_ni_tio_winsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int channel = CR_CHAN(insn->chan_desc);
+	unsigned int load_reg;
+
+	uint32_t *data = (uint32_t *)insn->data;
+
+	if (insn->data_size != sizeof(uint32_t))
+		return -EINVAL;
+
+	switch (channel) {
+	case 0:
+		/* Unsafe if counter is armed.  Should probably check
+		   status and return -EBUSY if armed. */
+		/* Don't disturb load source select, just use
+		   whichever load register is already selected. */
+		load_reg = ni_tio_next_load_register(counter);
+		write_register(counter, data[0], load_reg);
+		ni_tio_set_bits_transient(counter,
+			NITIO_Gi_Command_Reg(counter->counter_index), 0, 0,
+			Gi_Load_Bit);
+		/* Restore state of load reg to whatever the user set
+		   last set it to */
+		write_register(counter, counter_dev->regs[load_reg], load_reg);
+		break;
+	case 1:
+		counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] =
+			data[0];
+		write_register(counter, data[0],
+			NITIO_Gi_LoadA_Reg(counter->counter_index));
+		break;
+	case 2:
+		counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] =
+			data[0];
+		write_register(counter, data[0],
+			NITIO_Gi_LoadB_Reg(counter->counter_index));
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+
+	return 0;
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static void ni_tio_configure_dma(struct ni_gpct *counter,
+				 short enable, short read_not_write)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned int input_select_bits = 0;
+
+	if (enable) {
+		if (read_not_write) {
+			input_select_bits |= Gi_Read_Acknowledges_Irq;
+		} else {
+			input_select_bits |= Gi_Write_Acknowledges_Irq;
+		}
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Read_Acknowledges_Irq | Gi_Write_Acknowledges_Irq,
+		input_select_bits);
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+		break;
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		{
+			unsigned gi_dma_config_bits = 0;
+
+			if (enable) {
+				gi_dma_config_bits |= Gi_DMA_Enable_Bit;
+				gi_dma_config_bits |= Gi_DMA_Int_Bit;
+			}
+			if (read_not_write == 0) {
+				gi_dma_config_bits |= Gi_DMA_Write_Bit;
+			}
+			ni_tio_set_bits(counter,
+				NITIO_Gi_DMA_Config_Reg(counter->counter_index),
+				Gi_DMA_Enable_Bit | Gi_DMA_Int_Bit |
+				Gi_DMA_Write_Bit, gi_dma_config_bits);
+		}
+		break;
+	}
+}
+
+/* TODO: a4l_ni_tio_input_inttrig is left unused because the trigger
+   callback cannot be changed at run time */
+int a4l_ni_tio_input_inttrig(struct ni_gpct *counter, lsampl_t trignum)
+{
+	unsigned long flags;
+	int retval = 0;
+
+	BUG_ON(counter == NULL);
+	if (trignum != 0)
+		return -EINVAL;
+
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan)
+		a4l_mite_dma_arm(counter->mite_chan);
+	else
+		retval = -EIO;
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+	if (retval < 0)
+		return retval;
+	retval = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+
+	/* TODO: disable trigger until a command is recorded.
+	   Null trig at beginning prevent ao start trigger from executing
+	   more than once per command (and doing things like trying to
+	   allocate the ao dma channel multiple times) */
+
+	return retval;
+}
+
+static int ni_tio_input_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	int retval = 0;
+
+	counter->mite_chan->dir = A4L_INPUT;
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		a4l_mite_prep_dma(counter->mite_chan, 32, 32);
+		break;
+	case ni_gpct_variant_e_series:
+		a4l_mite_prep_dma(counter->mite_chan, 16, 32);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+		Gi_Save_Trace_Bit, 0);
+	ni_tio_configure_dma(counter, 1, 1);
+	switch (cmd->start_src) {
+	case TRIG_NOW:
+		a4l_mite_dma_arm(counter->mite_chan);
+		retval = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+		break;
+	case TRIG_INT:
+		break;
+	case TRIG_EXT:
+		a4l_mite_dma_arm(counter->mite_chan);
+		retval = ni_tio_arm(counter, 1, cmd->start_arg);
+		break;
+	case TRIG_OTHER:
+		a4l_mite_dma_arm(counter->mite_chan);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return retval;
+}
+
+static int ni_tio_output_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	__a4l_err("ni_tio: output commands not yet implemented.\n");
+	return -ENOTSUPP;
+}
+
+static int ni_tio_cmd_setup(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	int retval = 0, set_gate_source = 0;
+	unsigned int gate_source;
+
+	if (cmd->scan_begin_src == TRIG_EXT) {
+		set_gate_source = 1;
+		gate_source = cmd->scan_begin_arg;
+	} else if (cmd->convert_src == TRIG_EXT) {
+		set_gate_source = 1;
+		gate_source = cmd->convert_arg;
+	}
+	if (set_gate_source) {
+		retval = ni_tio_set_gate_src(counter, 0, gate_source);
+	}
+	if (cmd->flags & TRIG_WAKE_EOS) {
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index),
+			Gi_Gate_Interrupt_Enable_Bit(counter->counter_index),
+			Gi_Gate_Interrupt_Enable_Bit(counter->counter_index));
+	}
+	return retval;
+}
+
+int a4l_ni_tio_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	int retval = 0;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan == NULL) {
+		__a4l_err("a4l_ni_tio_cmd: commands only supported with DMA."
+			     " Interrupt-driven commands not yet implemented.\n");
+		retval = -EIO;
+	} else {
+		retval = ni_tio_cmd_setup(counter, cmd);
+		if (retval == 0) {
+			if (cmd->flags & A4L_CMD_WRITE) {
+				retval = ni_tio_output_cmd(counter, cmd);
+			} else {
+				retval = ni_tio_input_cmd(counter, cmd);
+			}
+		}
+	}
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+	return retval;
+}
+
+struct a4l_cmd_desc a4l_ni_tio_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT | TRIG_OTHER | TRIG_EXT,
+	.scan_begin_src = TRIG_FOLLOW | TRIG_EXT | TRIG_OTHER,
+	.convert_src = TRIG_NOW | TRIG_EXT | TRIG_OTHER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+int a4l_ni_tio_cmdtest(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	/* Make sure trigger sources are trivially valid */
+
+	if ((cmd->start_src & TRIG_EXT) != 0 &&
+	    ni_tio_counting_mode_registers_present(counter->counter_dev) == 0)
+		return -EINVAL;
+
+	/* Make sure trigger sources are mutually compatible */
+
+	if (cmd->convert_src != TRIG_NOW && cmd->scan_begin_src != TRIG_FOLLOW)
+		return -EINVAL;
+
+	/* Make sure arguments are trivially compatible */
+
+	if (cmd->start_src != TRIG_EXT) {
+		if (cmd->start_arg != 0) {
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->scan_begin_src != TRIG_EXT) {
+		if (cmd->scan_begin_arg) {
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->convert_src != TRIG_EXT) {
+		if (cmd->convert_arg) {
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->scan_end_arg != cmd->nb_chan) {
+		return -EINVAL;
+	}
+
+	if (cmd->stop_src == TRIG_NONE) {
+		if (cmd->stop_arg != 0) {
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int a4l_ni_tio_cancel(struct ni_gpct *counter)
+{
+	unsigned long flags;
+
+	ni_tio_arm(counter, 0, 0);
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan) {
+		a4l_mite_dma_disarm(counter->mite_chan);
+	}
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+	ni_tio_configure_dma(counter, 0, 0);
+
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index),
+		Gi_Gate_Interrupt_Enable_Bit(counter->counter_index), 0x0);
+	return 0;
+}
+
+/*  During buffered input counter operation for e-series, the gate
+   interrupt is acked automatically by the dma controller, due to the
+   Gi_Read/Write_Acknowledges_IRQ bits in the input select
+   register. */
+static int should_ack_gate(struct ni_gpct *counter)
+{
+	unsigned long flags;
+	int retval = 0;
+
+	switch (counter->counter_dev->variant) {
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		/* Not sure if 660x really supports gate interrupts
+		   (the bits are not listed in register-level manual) */
+		return 1;
+		break;
+	case ni_gpct_variant_e_series:
+		rtdm_lock_get_irqsave(&counter->lock, flags);
+		{
+			if (counter->mite_chan == NULL ||
+				counter->mite_chan->dir != A4L_INPUT ||
+				(a4l_mite_done(counter->mite_chan))) {
+				retval = 1;
+			}
+		}
+		rtdm_lock_put_irqrestore(&counter->lock, flags);
+		break;
+	}
+	return retval;
+}
+
+void a4l_ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
+				    int *gate_error,
+				    int *tc_error,
+				    int *perm_stale_data, int *stale_data)
+{
+	const unsigned short gxx_status = read_register(counter,
+		NITIO_Gxx_Status_Reg(counter->counter_index));
+	const unsigned short gi_status = read_register(counter,
+		NITIO_Gi_Status_Reg(counter->counter_index));
+	unsigned ack = 0;
+
+	if (gate_error)
+		*gate_error = 0;
+	if (tc_error)
+		*tc_error = 0;
+	if (perm_stale_data)
+		*perm_stale_data = 0;
+	if (stale_data)
+		*stale_data = 0;
+
+	if (gxx_status & Gi_Gate_Error_Bit(counter->counter_index)) {
+		ack |= Gi_Gate_Error_Confirm_Bit(counter->counter_index);
+		if (gate_error) {
+			/* 660x don't support automatic
+			   acknowledgement of gate interrupt via dma
+			   read/write and report bogus gate errors */
+			if (counter->counter_dev->variant !=
+				ni_gpct_variant_660x) {
+				*gate_error = 1;
+			}
+		}
+	}
+	if (gxx_status & Gi_TC_Error_Bit(counter->counter_index)) {
+		ack |= Gi_TC_Error_Confirm_Bit(counter->counter_index);
+		if (tc_error)
+			*tc_error = 1;
+	}
+	if (gi_status & Gi_TC_Bit) {
+		ack |= Gi_TC_Interrupt_Ack_Bit;
+	}
+	if (gi_status & Gi_Gate_Interrupt_Bit) {
+		if (should_ack_gate(counter))
+			ack |= Gi_Gate_Interrupt_Ack_Bit;
+	}
+	if (ack)
+		write_register(counter, ack,
+			NITIO_Gi_Interrupt_Acknowledge_Reg(counter->
+				counter_index));
+	if (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Mode_Reg(counter->
+				counter_index)) & Gi_Loading_On_Gate_Bit) {
+		if (gxx_status & Gi_Stale_Data_Bit(counter->counter_index)) {
+			if (stale_data)
+				*stale_data = 1;
+		}
+		if (read_register(counter,
+				NITIO_Gxx_Joint_Status2_Reg(counter->
+					counter_index)) &
+			Gi_Permanent_Stale_Bit(counter->counter_index)) {
+			__a4l_err("%s: Gi_Permanent_Stale_Data detected.\n",
+				    __FUNCTION__);
+			if (perm_stale_data)
+				*perm_stale_data = 1;
+		}
+	}
+}
+
+/* TODO: to be adapted after a4l_buf_evt review */
+void a4l_ni_tio_handle_interrupt(struct ni_gpct *counter, struct a4l_device *dev)
+{
+	unsigned gpct_mite_status;
+	unsigned long flags;
+	int gate_error;
+	int tc_error;
+	int perm_stale_data;
+	struct a4l_subdevice *subd =
+		a4l_get_subd(dev, NI_GPCT_SUBDEV(counter->counter_index));
+
+	a4l_ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error,
+		&perm_stale_data, NULL);
+	if (gate_error) {
+		__a4l_err("%s: Gi_Gate_Error detected.\n", __FUNCTION__);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+	if (perm_stale_data) {
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+	switch (counter->counter_dev->variant) {
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		if (read_register(counter,
+				  NITIO_Gi_DMA_Status_Reg(counter->counter_index))
+		    & Gi_DRQ_Error_Bit) {
+			__a4l_err("%s: Gi_DRQ_Error detected.\n", __FUNCTION__);
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+		}
+		break;
+	case ni_gpct_variant_e_series:
+		break;
+	}
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&counter->lock, flags);
+		return;
+	}
+	gpct_mite_status = a4l_mite_get_status(counter->mite_chan);
+	if (gpct_mite_status & CHSR_LINKC) {
+		writel(CHOR_CLRLC,
+			counter->mite_chan->mite->mite_io_addr +
+			MITE_CHOR(counter->mite_chan->channel));
+	}
+	a4l_mite_sync_input_dma(counter->mite_chan, subd);
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+}
+
+void a4l_ni_tio_set_mite_channel(struct ni_gpct *counter,
+			     struct mite_channel *mite_chan)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	counter->mite_chan = mite_chan;
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static int __init ni_tio_init_module(void)
+{
+	return 0;
+}
+
+static void __exit ni_tio_cleanup_module(void)
+{
+}
+
+MODULE_DESCRIPTION("Analogy support for NI general-purpose counters");
+MODULE_LICENSE("GPL");
+
+module_init(ni_tio_init_module);
+module_exit(ni_tio_cleanup_module);
+
+EXPORT_SYMBOL_GPL(a4l_ni_tio_rinsn);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_winsn);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_insn_config);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_init_counter);
+EXPORT_SYMBOL_GPL(a4l_ni_gpct_device_construct);
+EXPORT_SYMBOL_GPL(a4l_ni_gpct_device_destroy);
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+EXPORT_SYMBOL_GPL(a4l_ni_tio_input_inttrig);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cmd);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cmd_mask);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cmdtest);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cancel);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_handle_interrupt);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_set_mite_channel);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_acknowledge_and_confirm);
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+++ linux-patched/drivers/xenomai/analogy/national_instruments/ni_tio.h	2022-03-21 12:58:31.078872491 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for NI general purpose counter
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __ANALOGY_NI_TIO_H__
+#define __ANALOGY_NI_TIO_H__
+
+#include <rtdm/analogy/device.h>
+
+#ifdef CONFIG_PCI
+#include "mite.h"
+#endif
+
+enum ni_gpct_register {
+	NITIO_G0_Autoincrement_Reg,
+	NITIO_G1_Autoincrement_Reg,
+	NITIO_G2_Autoincrement_Reg,
+	NITIO_G3_Autoincrement_Reg,
+	NITIO_G0_Command_Reg,
+	NITIO_G1_Command_Reg,
+	NITIO_G2_Command_Reg,
+	NITIO_G3_Command_Reg,
+	NITIO_G0_HW_Save_Reg,
+	NITIO_G1_HW_Save_Reg,
+	NITIO_G2_HW_Save_Reg,
+	NITIO_G3_HW_Save_Reg,
+	NITIO_G0_SW_Save_Reg,
+	NITIO_G1_SW_Save_Reg,
+	NITIO_G2_SW_Save_Reg,
+	NITIO_G3_SW_Save_Reg,
+	NITIO_G0_Mode_Reg,
+	NITIO_G1_Mode_Reg,
+	NITIO_G2_Mode_Reg,
+	NITIO_G3_Mode_Reg,
+	NITIO_G0_LoadA_Reg,
+	NITIO_G1_LoadA_Reg,
+	NITIO_G2_LoadA_Reg,
+	NITIO_G3_LoadA_Reg,
+	NITIO_G0_LoadB_Reg,
+	NITIO_G1_LoadB_Reg,
+	NITIO_G2_LoadB_Reg,
+	NITIO_G3_LoadB_Reg,
+	NITIO_G0_Input_Select_Reg,
+	NITIO_G1_Input_Select_Reg,
+	NITIO_G2_Input_Select_Reg,
+	NITIO_G3_Input_Select_Reg,
+	NITIO_G0_Counting_Mode_Reg,
+	NITIO_G1_Counting_Mode_Reg,
+	NITIO_G2_Counting_Mode_Reg,
+	NITIO_G3_Counting_Mode_Reg,
+	NITIO_G0_Second_Gate_Reg,
+	NITIO_G1_Second_Gate_Reg,
+	NITIO_G2_Second_Gate_Reg,
+	NITIO_G3_Second_Gate_Reg,
+	NITIO_G01_Status_Reg,
+	NITIO_G23_Status_Reg,
+	NITIO_G01_Joint_Reset_Reg,
+	NITIO_G23_Joint_Reset_Reg,
+	NITIO_G01_Joint_Status1_Reg,
+	NITIO_G23_Joint_Status1_Reg,
+	NITIO_G01_Joint_Status2_Reg,
+	NITIO_G23_Joint_Status2_Reg,
+	NITIO_G0_DMA_Config_Reg,
+	NITIO_G1_DMA_Config_Reg,
+	NITIO_G2_DMA_Config_Reg,
+	NITIO_G3_DMA_Config_Reg,
+	NITIO_G0_DMA_Status_Reg,
+	NITIO_G1_DMA_Status_Reg,
+	NITIO_G2_DMA_Status_Reg,
+	NITIO_G3_DMA_Status_Reg,
+	NITIO_G0_ABZ_Reg,
+	NITIO_G1_ABZ_Reg,
+	NITIO_G0_Interrupt_Acknowledge_Reg,
+	NITIO_G1_Interrupt_Acknowledge_Reg,
+	NITIO_G2_Interrupt_Acknowledge_Reg,
+	NITIO_G3_Interrupt_Acknowledge_Reg,
+	NITIO_G0_Status_Reg,
+	NITIO_G1_Status_Reg,
+	NITIO_G2_Status_Reg,
+	NITIO_G3_Status_Reg,
+	NITIO_G0_Interrupt_Enable_Reg,
+	NITIO_G1_Interrupt_Enable_Reg,
+	NITIO_G2_Interrupt_Enable_Reg,
+	NITIO_G3_Interrupt_Enable_Reg,
+	NITIO_Num_Registers,
+};
+
+static inline enum ni_gpct_register NITIO_Gi_Autoincrement_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Autoincrement_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Autoincrement_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Autoincrement_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Autoincrement_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Command_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Command_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Command_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Command_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Command_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Counting_Mode_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Counting_Mode_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Counting_Mode_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Counting_Mode_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Counting_Mode_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Input_Select_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Input_Select_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Input_Select_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Input_Select_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Input_Select_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Joint_Reset_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Joint_Reset_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Joint_Reset_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Joint_Status1_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Joint_Status1_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Joint_Status1_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Joint_Status2_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Joint_Status2_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Joint_Status2_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Status_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Status_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Status_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_LoadA_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_LoadA_Reg;
+		break;
+	case 1:
+		return NITIO_G1_LoadA_Reg;
+		break;
+	case 2:
+		return NITIO_G2_LoadA_Reg;
+		break;
+	case 3:
+		return NITIO_G3_LoadA_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_LoadB_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_LoadB_Reg;
+		break;
+	case 1:
+		return NITIO_G1_LoadB_Reg;
+		break;
+	case 2:
+		return NITIO_G2_LoadB_Reg;
+		break;
+	case 3:
+		return NITIO_G3_LoadB_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Mode_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Mode_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Mode_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Mode_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Mode_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_SW_Save_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_SW_Save_Reg;
+		break;
+	case 1:
+		return NITIO_G1_SW_Save_Reg;
+		break;
+	case 2:
+		return NITIO_G2_SW_Save_Reg;
+		break;
+	case 3:
+		return NITIO_G3_SW_Save_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Second_Gate_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Second_Gate_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Second_Gate_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Second_Gate_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Second_Gate_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_DMA_Config_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_DMA_Config_Reg;
+		break;
+	case 1:
+		return NITIO_G1_DMA_Config_Reg;
+		break;
+	case 2:
+		return NITIO_G2_DMA_Config_Reg;
+		break;
+	case 3:
+		return NITIO_G3_DMA_Config_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_DMA_Status_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_DMA_Status_Reg;
+		break;
+	case 1:
+		return NITIO_G1_DMA_Status_Reg;
+		break;
+	case 2:
+		return NITIO_G2_DMA_Status_Reg;
+		break;
+	case 3:
+		return NITIO_G3_DMA_Status_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_ABZ_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_ABZ_Reg;
+		break;
+	case 1:
+		return NITIO_G1_ABZ_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Interrupt_Acknowledge_Reg(int
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Interrupt_Acknowledge_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Interrupt_Acknowledge_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Interrupt_Acknowledge_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Interrupt_Acknowledge_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Status_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Status_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Status_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Status_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Status_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Interrupt_Enable_Reg(int
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Interrupt_Enable_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Interrupt_Enable_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Interrupt_Enable_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Interrupt_Enable_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+enum ni_gpct_variant {
+	ni_gpct_variant_e_series,
+	ni_gpct_variant_m_series,
+	ni_gpct_variant_660x
+};
+
+struct ni_gpct {
+	struct ni_gpct_device *counter_dev;
+	unsigned counter_index;
+	unsigned chip_index;
+	uint64_t clock_period_ps; /* clock period in picoseconds */
+	struct mite_channel *mite_chan;
+	rtdm_lock_t lock;
+};
+
+struct ni_gpct_device {
+	struct a4l_device *dev;
+	void (*write_register)(struct ni_gpct * counter,
+				unsigned int bits, enum ni_gpct_register reg);
+	unsigned (*read_register)(struct ni_gpct * counter,
+				   enum ni_gpct_register reg);
+	enum ni_gpct_variant variant;
+	struct ni_gpct **counters;
+	unsigned num_counters;
+	unsigned regs[NITIO_Num_Registers];
+	rtdm_lock_t regs_lock;
+};
+
+#define Gi_Auto_Increment_Mask		0xff
+#define Gi_Up_Down_Shift		5
+
+#define Gi_Arm_Bit			0x1
+#define Gi_Save_Trace_Bit		0x2
+#define Gi_Load_Bit			0x4
+#define Gi_Disarm_Bit			0x10
+#define Gi_Up_Down_Mask			(0x3 << Gi_Up_Down_Shift)
+#define Gi_Always_Down_Bits		(0x0 << Gi_Up_Down_Shift)
+#define Gi_Always_Up_Bits		(0x1 << Gi_Up_Down_Shift)
+#define Gi_Up_Down_Hardware_IO_Bits	(0x2 << Gi_Up_Down_Shift)
+#define Gi_Up_Down_Hardware_Gate_Bits	(0x3 << Gi_Up_Down_Shift)
+#define Gi_Write_Switch_Bit		0x80
+#define Gi_Synchronize_Gate_Bit		0x100
+#define Gi_Little_Big_Endian_Bit	0x200
+#define Gi_Bank_Switch_Start_Bit	0x400
+#define Gi_Bank_Switch_Mode_Bit		0x800
+#define Gi_Bank_Switch_Enable_Bit	0x1000
+#define Gi_Arm_Copy_Bit			0x2000
+#define Gi_Save_Trace_Copy_Bit		0x4000
+#define Gi_Disarm_Copy_Bit		0x8000
+
+#define Gi_Index_Phase_Bitshift	5
+#define Gi_HW_Arm_Select_Shift		8
+
+#define Gi_Counting_Mode_Mask		0x7
+#define Gi_Counting_Mode_Normal_Bits	0x0
+#define Gi_Counting_Mode_QuadratureX1_Bits 0x1
+#define Gi_Counting_Mode_QuadratureX2_Bits 0x2
+#define Gi_Counting_Mode_QuadratureX4_Bits 0x3
+#define Gi_Counting_Mode_Two_Pulse_Bits	0x4
+#define Gi_Counting_Mode_Sync_Source_Bits 0x6
+#define Gi_Index_Mode_Bit		0x10
+#define Gi_Index_Phase_Mask		(0x3 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_LowA_LowB	(0x0 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_LowA_HighB	(0x1 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_HighA_LowB	(0x2 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_HighA_HighB	(0x3 << Gi_Index_Phase_Bitshift)
+
+/* From m-series example code,
+   not documented in 660x register level manual */
+#define Gi_HW_Arm_Enable_Bit		0x80
+/* From m-series example code,
+   not documented in 660x register level manual */
+#define Gi_660x_HW_Arm_Select_Mask	(0x7 << Gi_HW_Arm_Select_Shift)
+#define Gi_660x_Prescale_X8_Bit		0x1000
+#define Gi_M_Series_Prescale_X8_Bit	0x2000
+#define Gi_M_Series_HW_Arm_Select_Mask	(0x1f << Gi_HW_Arm_Select_Shift)
+/* Must be set for clocks over 40MHz,
+   which includes synchronous counting and quadrature modes */
+#define Gi_660x_Alternate_Sync_Bit	0x2000
+#define Gi_M_Series_Alternate_Sync_Bit	0x4000
+/* From m-series example code,
+   not documented in 660x register level manual */
+#define Gi_660x_Prescale_X2_Bit		0x4000
+#define Gi_M_Series_Prescale_X2_Bit	0x8000
+
+static inline unsigned int Gi_Alternate_Sync_Bit(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_Alternate_Sync_Bit;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_Alternate_Sync_Bit;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline unsigned int Gi_Prescale_X2_Bit(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_Prescale_X2_Bit;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_Prescale_X2_Bit;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline unsigned int Gi_Prescale_X8_Bit(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_Prescale_X8_Bit;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_Prescale_X8_Bit;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline unsigned int Gi_HW_Arm_Select_Mask(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_HW_Arm_Select_Mask;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_HW_Arm_Select_Mask;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+#define NI_660x_Timebase_1_Clock	0x0 /* 20MHz */
+#define NI_660x_Source_Pin_i_Clock	0x1
+#define NI_660x_Next_Gate_Clock		0xa
+#define NI_660x_Timebase_2_Clock	0x12 /* 100KHz */
+#define NI_660x_Next_TC_Clock		0x13
+#define NI_660x_Timebase_3_Clock	0x1e /* 80MHz */
+#define NI_660x_Logic_Low_Clock		0x1f
+
+#define ni_660x_max_rtsi_channel	6
+#define ni_660x_max_source_pin		7
+
+static inline unsigned int NI_660x_RTSI_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_rtsi_channel);
+	return (0xb + n);
+}
+
+static inline unsigned int NI_660x_Source_Pin_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_source_pin);
+	return (0x2 + n);
+}
+
+/* Clock sources for ni e and m series boards,
+   get bits with Gi_Source_Select_Bits() */
+#define NI_M_Series_Timebase_1_Clock	0x0 /* 20MHz */
+#define NI_M_Series_Timebase_2_Clock	0x12 /* 100KHz */
+#define NI_M_Series_Next_TC_Clock	0x13
+#define NI_M_Series_Next_Gate_Clock	0x14 /* when Gi_Src_SubSelect = 0 */
+#define NI_M_Series_PXI_Star_Trigger_Clock 0x14 /* when Gi_Src_SubSelect = 1 */
+#define NI_M_Series_PXI10_Clock		0x1d
+#define NI_M_Series_Timebase_3_Clock	0x1e /* 80MHz, when Gi_Src_SubSelect = 0 */
+#define NI_M_Series_Analog_Trigger_Out_Clock 0x1e /* when Gi_Src_SubSelect = 1 */
+#define NI_M_Series_Logic_Low_Clock	0x1f
+
+#define ni_m_series_max_pfi_channel	15
+#define ni_m_series_max_rtsi_channel	7
+
+static inline unsigned int NI_M_Series_PFI_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_pfi_channel);
+	if (n < 10)
+		return 1 + n;
+	else
+		return 0xb + n;
+}
+
+static inline unsigned int NI_M_Series_RTSI_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_rtsi_channel);
+	if (n == 7)
+		return 0x1b;
+	else
+		return 0xb + n;
+}
+
+#define NI_660x_Source_Pin_i_Gate_Select 0x0
+#define NI_660x_Gate_Pin_i_Gate_Select	0x1
+#define NI_660x_Next_SRC_Gate_Select	0xa
+#define NI_660x_Next_Out_Gate_Select	0x14
+#define NI_660x_Logic_Low_Gate_Select	0x1f
+#define ni_660x_max_gate_pin 7
+
+static inline unsigned int NI_660x_Gate_Pin_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_gate_pin);
+	return 0x2 + n;
+}
+
+static inline unsigned int NI_660x_RTSI_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_rtsi_channel);
+	return 0xb + n;
+}
+
+
+#define NI_M_Series_Timestamp_Mux_Gate_Select	0x0
+#define NI_M_Series_AI_START2_Gate_Select	0x12
+#define NI_M_Series_PXI_Star_Trigger_Gate_Select 0x13
+#define NI_M_Series_Next_Out_Gate_Select	0x14
+#define NI_M_Series_AI_START1_Gate_Select	0x1c
+#define NI_M_Series_Next_SRC_Gate_Select	0x1d
+#define NI_M_Series_Analog_Trigger_Out_Gate_Select 0x1e
+#define NI_M_Series_Logic_Low_Gate_Select	0x1f
+
+static inline unsigned int NI_M_Series_RTSI_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_rtsi_channel);
+	if (n == 7)
+		return 0x1b;
+	return 0xb + n;
+}
+
+static inline unsigned int NI_M_Series_PFI_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_pfi_channel);
+	if (n < 10)
+		return 1 + n;
+	return 0xb + n;
+}
+
+
+#define Gi_Source_Select_Shift 2
+#define Gi_Gate_Select_Shift 7
+
+#define Gi_Read_Acknowledges_Irq	0x1 /* not present on 660x */
+#define Gi_Write_Acknowledges_Irq	0x2 /* not present on 660x */
+#define Gi_Source_Select_Mask		0x7c
+#define Gi_Gate_Select_Mask		(0x1f << Gi_Gate_Select_Shift)
+#define Gi_Gate_Select_Load_Source_Bit	0x1000
+#define Gi_Or_Gate_Bit			0x2000
+#define Gi_Output_Polarity_Bit		0x4000 /* set to invert */
+#define Gi_Source_Polarity_Bit		0x8000 /* set to invert */
+
+#define Gi_Source_Select_Bits(x) ((x << Gi_Source_Select_Shift) & \
+				  Gi_Source_Select_Mask)
+#define Gi_Gate_Select_Bits(x) ((x << Gi_Gate_Select_Shift) & \
+				Gi_Gate_Select_Mask)
+
+#define Gi_Gating_Mode_Mask		0x3
+#define Gi_Gating_Disabled_Bits		0x0
+#define Gi_Level_Gating_Bits		0x1
+#define Gi_Rising_Edge_Gating_Bits	0x2
+#define Gi_Falling_Edge_Gating_Bits	0x3
+#define Gi_Gate_On_Both_Edges_Bit	0x4 /* used in conjunction with
+					       rising edge gating mode */
+#define Gi_Trigger_Mode_for_Edge_Gate_Mask 0x18
+#define Gi_Edge_Gate_Starts_Stops_Bits	0x0
+#define Gi_Edge_Gate_Stops_Starts_Bits	0x8
+#define Gi_Edge_Gate_Starts_Bits	0x10
+#define Gi_Edge_Gate_No_Starts_or_Stops_Bits 0x18
+#define Gi_Stop_Mode_Mask		0x60
+#define Gi_Stop_on_Gate_Bits		0x00
+#define Gi_Stop_on_Gate_or_TC_Bits	0x20
+#define Gi_Stop_on_Gate_or_Second_TC_Bits 0x40
+#define Gi_Load_Source_Select_Bit	0x80
+#define Gi_Output_Mode_Mask		0x300
+#define Gi_Output_TC_Pulse_Bits		0x100
+#define Gi_Output_TC_Toggle_Bits	0x200
+#define Gi_Output_TC_or_Gate_Toggle_Bits 0x300
+#define Gi_Counting_Once_Mask		0xc00
+#define Gi_No_Hardware_Disarm_Bits	0x000
+#define Gi_Disarm_at_TC_Bits		0x400
+#define Gi_Disarm_at_Gate_Bits		0x800
+#define Gi_Disarm_at_TC_or_Gate_Bits	0xc00
+#define Gi_Loading_On_TC_Bit		0x1000
+#define Gi_Gate_Polarity_Bit		0x2000
+#define Gi_Loading_On_Gate_Bit		0x4000
+#define Gi_Reload_Source_Switching_Bit	0x8000
+
+#define NI_660x_Source_Pin_i_Second_Gate_Select		0x0
+#define NI_660x_Up_Down_Pin_i_Second_Gate_Select	0x1
+#define NI_660x_Next_SRC_Second_Gate_Select		0xa
+#define NI_660x_Next_Out_Second_Gate_Select		0x14
+#define NI_660x_Selected_Gate_Second_Gate_Select	0x1e
+#define NI_660x_Logic_Low_Second_Gate_Select		0x1f
+
+#define ni_660x_max_up_down_pin		7
+
+static inline
+unsigned int NI_660x_Up_Down_Pin_Second_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_up_down_pin);
+	return 0x2 + n;
+}
+static inline
+unsigned int NI_660x_RTSI_Second_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_rtsi_channel);
+	return 0xb + n;
+}
+
+#define Gi_Second_Gate_Select_Shift	7
+
+/*FIXME: m-series has a second gate subselect bit */
+/*FIXME: m-series second gate sources are undocumented (by NI)*/
+#define Gi_Second_Gate_Mode_Bit		0x1
+#define Gi_Second_Gate_Select_Mask	(0x1f << Gi_Second_Gate_Select_Shift)
+#define Gi_Second_Gate_Polarity_Bit	0x2000
+#define Gi_Second_Gate_Subselect_Bit	0x4000 /* m-series only */
+#define Gi_Source_Subselect_Bit		0x8000 /* m-series only */
+
+static inline
+unsigned int Gi_Second_Gate_Select_Bits(unsigned int second_gate_select)
+{
+	return (second_gate_select << Gi_Second_Gate_Select_Shift) &
+		Gi_Second_Gate_Select_Mask;
+}
+
+#define G0_Save_Bit		0x1
+#define G1_Save_Bit		0x2
+#define G0_Counting_Bit		0x4
+#define G1_Counting_Bit		0x8
+#define G0_Next_Load_Source_Bit	0x10
+#define G1_Next_Load_Source_Bit	0x20
+#define G0_Stale_Data_Bit	0x40
+#define G1_Stale_Data_Bit	0x80
+#define G0_Armed_Bit		0x100
+#define G1_Armed_Bit		0x200
+#define G0_No_Load_Between_Gates_Bit 0x400
+#define G1_No_Load_Between_Gates_Bit 0x800
+#define G0_TC_Error_Bit		0x1000
+#define G1_TC_Error_Bit		0x2000
+#define G0_Gate_Error_Bit	0x4000
+#define G1_Gate_Error_Bit	0x8000
+
+static inline unsigned int Gi_Counting_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Counting_Bit;
+	return G0_Counting_Bit;
+}
+
+static inline unsigned int Gi_Armed_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Armed_Bit;
+	return G0_Armed_Bit;
+}
+
+static inline unsigned int Gi_Next_Load_Source_Bit(unsigned counter_index)
+{
+	if (counter_index % 2)
+		return G1_Next_Load_Source_Bit;
+	return G0_Next_Load_Source_Bit;
+}
+
+static inline unsigned int Gi_Stale_Data_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Stale_Data_Bit;
+	return G0_Stale_Data_Bit;
+}
+
+static inline unsigned int Gi_TC_Error_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_TC_Error_Bit;
+	return G0_TC_Error_Bit;
+}
+
+static inline unsigned int Gi_Gate_Error_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Gate_Error_Bit;
+	return G0_Gate_Error_Bit;
+}
+
+/* Joint reset register bits */
+static inline unsigned Gi_Reset_Bit(unsigned int counter_index)
+{
+	return 0x1 << (2 + (counter_index % 2));
+}
+
+#define G0_Output_Bit		0x1
+#define G1_Output_Bit		0x2
+#define G0_HW_Save_Bit		0x1000
+#define G1_HW_Save_Bit		0x2000
+#define G0_Permanent_Stale_Bit	0x4000
+#define G1_Permanent_Stale_Bit	0x8000
+
+static inline unsigned int Gi_Permanent_Stale_Bit(unsigned
+	counter_index)
+{
+	if (counter_index % 2)
+		return G1_Permanent_Stale_Bit;
+	return G0_Permanent_Stale_Bit;
+}
+
+#define Gi_DMA_Enable_Bit	0x1
+#define Gi_DMA_Write_Bit	0x2
+#define Gi_DMA_Int_Bit		0x4
+
+#define Gi_DMA_Readbank_Bit	0x2000
+#define Gi_DRQ_Error_Bit	0x4000
+#define Gi_DRQ_Status_Bit	0x8000
+
+#define G0_Gate_Error_Confirm_Bit	0x20
+#define G0_TC_Error_Confirm_Bit		0x40
+
+#define G1_Gate_Error_Confirm_Bit	0x2
+#define G1_TC_Error_Confirm_Bit		0x4
+
+static inline unsigned int Gi_Gate_Error_Confirm_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Gate_Error_Confirm_Bit;
+	return G0_Gate_Error_Confirm_Bit;
+}
+
+static inline unsigned int Gi_TC_Error_Confirm_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_TC_Error_Confirm_Bit;
+	return G0_TC_Error_Confirm_Bit;
+}
+
+/* Bits that are the same in G0/G2 and G1/G3 interrupt acknowledge registers */
+#define Gi_TC_Interrupt_Ack_Bit		0x4000
+#define Gi_Gate_Interrupt_Ack_Bit	0x8000
+
+#define Gi_Gate_Interrupt_Bit	0x4
+#define Gi_TC_Bit		0x8
+#define Gi_Interrupt_Bit	0x8000
+
+#define G0_TC_Interrupt_Enable_Bit	0x40
+#define G0_Gate_Interrupt_Enable_Bit	0x100
+
+#define G1_TC_Interrupt_Enable_Bit	0x200
+#define G1_Gate_Interrupt_Enable_Bit	0x400
+
+static inline unsigned int Gi_Gate_Interrupt_Enable_Bit(unsigned int counter_index)
+{
+	unsigned int bit;
+
+	if (counter_index % 2) {
+		bit = G1_Gate_Interrupt_Enable_Bit;
+	} else {
+		bit = G0_Gate_Interrupt_Enable_Bit;
+	}
+	return bit;
+}
+
+#define counter_status_mask (A4L_COUNTER_ARMED | A4L_COUNTER_COUNTING)
+
+#define NI_USUAL_PFI_SELECT(x)	((x < 10) ? (0x1 + x) : (0xb + x))
+#define NI_USUAL_RTSI_SELECT(x)	((x < 7 ) ? (0xb + x) : (0x1b + x))
+
+/* Mode bits for NI general-purpose counters, set with
+   INSN_CONFIG_SET_COUNTER_MODE */
+#define NI_GPCT_COUNTING_MODE_SHIFT		16
+#define NI_GPCT_INDEX_PHASE_BITSHIFT		20
+#define NI_GPCT_COUNTING_DIRECTION_SHIFT	24
+
+#define NI_GPCT_GATE_ON_BOTH_EDGES_BIT		0x4
+#define NI_GPCT_EDGE_GATE_MODE_MASK		0x18
+#define NI_GPCT_EDGE_GATE_STARTS_STOPS_BITS	0x0
+#define NI_GPCT_EDGE_GATE_STOPS_STARTS_BITS	0x8
+#define NI_GPCT_EDGE_GATE_STARTS_BITS		0x10
+#define NI_GPCT_EDGE_GATE_NO_STARTS_NO_STOPS_BITS 0x18
+#define NI_GPCT_STOP_MODE_MASK			0x60
+#define NI_GPCT_STOP_ON_GATE_BITS		0x00
+#define NI_GPCT_STOP_ON_GATE_OR_TC_BITS		0x20
+#define NI_GPCT_STOP_ON_GATE_OR_SECOND_TC_BITS	0x40
+#define NI_GPCT_LOAD_B_SELECT_BIT		0x80
+#define NI_GPCT_OUTPUT_MODE_MASK		0x300
+#define NI_GPCT_OUTPUT_TC_PULSE_BITS		0x100
+#define NI_GPCT_OUTPUT_TC_TOGGLE_BITS		0x200
+#define NI_GPCT_OUTPUT_TC_OR_GATE_TOGGLE_BITS	0x300
+#define NI_GPCT_HARDWARE_DISARM_MASK		0xc00
+#define NI_GPCT_NO_HARDWARE_DISARM_BITS		0x000
+#define NI_GPCT_DISARM_AT_TC_BITS		0x400
+#define NI_GPCT_DISARM_AT_GATE_BITS		0x800
+#define NI_GPCT_DISARM_AT_TC_OR_GATE_BITS	0xc00
+#define NI_GPCT_LOADING_ON_TC_BIT		0x1000
+#define NI_GPCT_LOADING_ON_GATE_BIT		0x4000
+#define NI_GPCT_COUNTING_MODE_MASK		0x7 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_NORMAL_BITS	0x0 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_QUADRATURE_X1_BITS 0x1 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_QUADRATURE_X2_BITS 0x2 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_QUADRATURE_X4_BITS 0x3 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_TWO_PULSE_BITS	0x4 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_SYNC_SOURCE_BITS	0x6 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_INDEX_PHASE_MASK		0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_LOW_A_LOW_B_BITS	0x0 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_LOW_A_HIGH_B_BITS	0x1 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_HIGH_A_LOW_B_BITS	0x2 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_HIGH_A_HIGH_B_BITS	0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_ENABLE_BIT		0x400000
+#define NI_GPCT_COUNTING_DIRECTION_MASK		0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_DOWN_BITS	0x00 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_UP_BITS	0x1 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_HW_UP_DOWN_BITS 0x2 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_HW_GATE_BITS 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_RELOAD_SOURCE_MASK		0xc000000
+#define NI_GPCT_RELOAD_SOURCE_FIXED_BITS	0x0
+#define NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS	0x4000000
+#define NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS	0x8000000
+#define NI_GPCT_OR_GATE_BIT			0x10000000
+#define NI_GPCT_INVERT_OUTPUT_BIT		0x20000000
+
+/* Bits for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC when
+   using NI general-purpose counters. */
+#define NI_GPCT_CLOCK_SRC_SELECT_MASK		0x3f
+#define NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS	0x0
+#define NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS	0x1
+#define NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS	0x2
+#define NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS	0x3
+#define NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS	0x4
+#define NI_GPCT_NEXT_TC_CLOCK_SRC_BITS		0x5
+#define NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS	0x6 /* NI 660x-specific */
+#define NI_GPCT_PXI10_CLOCK_SRC_BITS		0x7
+#define NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS	0x8
+#define NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS 0x9
+#define NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK	0x30000000
+#define NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS	0x0
+#define NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS	0x10000000 /* divide source by 2 */
+#define NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS	0x20000000 /* divide source by 8 */
+#define NI_GPCT_INVERT_CLOCK_SRC_BIT		0x80000000
+#define NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(x)	(0x10 + x)
+#define NI_GPCT_RTSI_CLOCK_SRC_BITS(x)		(0x18 + x)
+#define NI_GPCT_PFI_CLOCK_SRC_BITS(x)		(0x20 + x)
+
+/* Possibilities for setting a gate source with
+   INSN_CONFIG_SET_GATE_SRC when using NI general-purpose counters.
+   May be bitwise-or'd with CR_EDGE or CR_INVERT. */
+/* M-series gates */
+#define NI_GPCT_TIMESTAMP_MUX_GATE_SELECT	0x0
+#define NI_GPCT_AI_START2_GATE_SELECT		0x12
+#define NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT	0x13
+#define NI_GPCT_NEXT_OUT_GATE_SELECT		0x14
+#define NI_GPCT_AI_START1_GATE_SELECT		0x1c
+#define NI_GPCT_NEXT_SOURCE_GATE_SELECT		0x1d
+#define NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT	0x1e
+#define NI_GPCT_LOGIC_LOW_GATE_SELECT		0x1f
+/* More gates for 660x */
+#define NI_GPCT_SOURCE_PIN_i_GATE_SELECT	0x100
+#define NI_GPCT_GATE_PIN_i_GATE_SELECT		0x101
+/* More gates for 660x "second gate" */
+#define NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT	0x201
+#define NI_GPCT_SELECTED_GATE_GATE_SELECT	0x21e
+/* M-series "second gate" sources are unknown, we should add them here
+   with an offset of 0x300 when known. */
+#define NI_GPCT_DISABLED_GATE_SELECT		0x8000
+#define NI_GPCT_GATE_PIN_GATE_SELECT(x)	(0x102 + x)
+#define NI_GPCT_RTSI_GATE_SELECT(x)		NI_USUAL_RTSI_SELECT(x)
+#define NI_GPCT_PFI_GATE_SELECT(x)		NI_USUAL_PFI_SELECT(x)
+#define NI_GPCT_UP_DOWN_PIN_GATE_SELECT(x)	(0x202 + x)
+
+/* Possibilities for setting a source with INSN_CONFIG_SET_OTHER_SRC
+   when using NI general-purpose counters. */
+#define NI_GPCT_SOURCE_ENCODER_A 0
+#define NI_GPCT_SOURCE_ENCODER_B 1
+#define NI_GPCT_SOURCE_ENCODER_Z 2
+/* M-series gates */
+/* Still unknown, probably only need NI_GPCT_PFI_OTHER_SELECT */
+#define NI_GPCT_DISABLED_OTHER_SELECT	0x8000
+#define NI_GPCT_PFI_OTHER_SELECT(x) NI_USUAL_PFI_SELECT(x)
+
+/* Start sources for ni general-purpose counters for use with
+   INSN_CONFIG_ARM */
+#define NI_GPCT_ARM_IMMEDIATE		0x0
+/* Start both the counter and the adjacent paired counter
+   simultaneously */
+#define NI_GPCT_ARM_PAIRED_IMMEDIATE	0x1
+/* NI doesn't document bits for selecting hardware arm triggers.  If
+   the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least significant
+   bits (3 bits for 660x or 5 bits for m-series) through to the
+   hardware. This will at least allow someone to figure out what the bits
+   do later. */
+#define NI_GPCT_ARM_UNKNOWN		0x1000
+
+/* Digital filtering options for ni 660x for use with
+   INSN_CONFIG_FILTER. */
+#define NI_GPCT_FILTER_OFF		0x0
+#define NI_GPCT_FILTER_TIMEBASE_3_SYNC	0x1
+#define NI_GPCT_FILTER_100x_TIMEBASE_1	0x2
+#define NI_GPCT_FILTER_20x_TIMEBASE_1	0x3
+#define NI_GPCT_FILTER_10x_TIMEBASE_1	0x4
+#define NI_GPCT_FILTER_2x_TIMEBASE_1	0x5
+#define NI_GPCT_FILTER_2x_TIMEBASE_3	0x6
+
+/* Master clock sources for ni mio boards and
+   INSN_CONFIG_SET_CLOCK_SRC */
+#define NI_MIO_INTERNAL_CLOCK		0
+#define NI_MIO_RTSI_CLOCK		1
+/* Doesn't work for m-series, use NI_MIO_PLL_RTSI_CLOCK() the
+   NI_MIO_PLL_* sources are m-series only */
+#define NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK 2
+#define NI_MIO_PLL_PXI10_CLOCK		3
+#define NI_MIO_PLL_RTSI0_CLOCK		4
+
+#define NI_MIO_PLL_RTSI_CLOCK(x) (NI_MIO_PLL_RTSI0_CLOCK + (x))
+
+/* Signals which can be routed to an NI RTSI pin with
+   INSN_CONFIG_SET_ROUTING. The numbers assigned are not arbitrary, they
+   correspond to the bits required to program the board. */
+#define NI_RTSI_OUTPUT_ADR_START1	0
+#define NI_RTSI_OUTPUT_ADR_START2	1
+#define NI_RTSI_OUTPUT_SCLKG		2
+#define NI_RTSI_OUTPUT_DACUPDN		3
+#define NI_RTSI_OUTPUT_DA_START1	4
+#define NI_RTSI_OUTPUT_G_SRC0		5
+#define NI_RTSI_OUTPUT_G_GATE0		6
+#define NI_RTSI_OUTPUT_RGOUT0		7
+#define NI_RTSI_OUTPUT_RTSI_BRD_0	8
+/* Pre-m-series always have RTSI clock on line 7 */
+#define NI_RTSI_OUTPUT_RTSI_OSC		12
+
+#define NI_RTSI_OUTPUT_RTSI_BRD(x) (NI_RTSI_OUTPUT_RTSI_BRD_0 + (x))
+
+
+int a4l_ni_tio_rinsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn);
+int a4l_ni_tio_winsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn);
+int a4l_ni_tio_insn_config(struct ni_gpct *counter, struct a4l_kernel_instruction *insn);
+void a4l_ni_tio_init_counter(struct ni_gpct *counter);
+
+struct ni_gpct_device *a4l_ni_gpct_device_construct(struct a4l_device * dev,
+	void (*write_register) (struct ni_gpct * counter, unsigned int bits,
+		enum ni_gpct_register reg),
+	unsigned int (*read_register) (struct ni_gpct * counter,
+		enum ni_gpct_register reg), enum ni_gpct_variant variant,
+	unsigned int num_counters);
+void a4l_ni_gpct_device_destroy(struct ni_gpct_device *counter_dev);
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+extern struct a4l_cmd_desc a4l_ni_tio_cmd_mask;
+
+int a4l_ni_tio_input_inttrig(struct ni_gpct *counter, lsampl_t trignum);
+int a4l_ni_tio_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd);
+int a4l_ni_tio_cmdtest(struct ni_gpct *counter, struct a4l_cmd_desc *cmd);
+int a4l_ni_tio_cancel(struct ni_gpct *counter);
+
+void a4l_ni_tio_handle_interrupt(struct ni_gpct *counter, struct a4l_device *dev);
+void a4l_ni_tio_set_mite_channel(struct ni_gpct *counter,
+			     struct mite_channel *mite_chan);
+void a4l_ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
+				    int *gate_error,
+				    int *tc_error,
+				    int *perm_stale_data, int *stale_data);
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+#endif /* !__ANALOGY_NI_TIO_H__ */
+++ linux-patched/drivers/xenomai/analogy/Makefile	2022-03-21 12:58:31.071872560 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/subdevice.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY) += xeno_analogy.o testing/ intel/ national_instruments/ sensoray/
+
+xeno_analogy-y := \
+	buffer.o \
+	command.o \
+	device.o \
+	driver.o \
+	driver_facilities.o \
+	instruction.o \
+	rtdm_helpers.o \
+	subdevice.o \
+	transfer.o \
+	rtdm_interface.o
+++ linux-patched/drivers/xenomai/analogy/subdevice.c	2022-03-21 12:58:31.064872628 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/driver.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, subdevice, channel and range related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/mman.h>
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+/* --- Common ranges declarations --- */
+
+struct a4l_rngtab rng_bipolar10 = { 1, {
+		RANGE_V(-10, 10),
+	}};
+struct a4l_rngdesc a4l_range_bipolar10 = RNG_GLOBAL(rng_bipolar10);
+
+struct a4l_rngtab rng_bipolar5 = { 1, {
+		RANGE_V(-5, 5),
+	}};
+struct a4l_rngdesc a4l_range_bipolar5 = RNG_GLOBAL(rng_bipolar5);
+
+struct a4l_rngtab rng_unipolar10 = { 1, {
+		RANGE_V(0, 10),
+	}};
+struct a4l_rngdesc a4l_range_unipolar10 = RNG_GLOBAL(rng_unipolar10);
+
+struct a4l_rngtab rng_unipolar5 = { 1, {
+		RANGE_V(0, 5),
+	}};
+struct a4l_rngdesc a4l_range_unipolar5 = RNG_GLOBAL(rng_unipolar5);
+
+struct a4l_rngtab rng_unknown = { 1, {
+		RANGE(0, 1),
+	}};
+struct a4l_rngdesc a4l_range_unknown = RNG_GLOBAL(rng_unknown);
+
+struct a4l_rngtab rng_fake = { 0, {
+		RANGE(0, 0),
+	}};
+struct a4l_rngdesc a4l_range_fake = RNG_GLOBAL(rng_fake);
+
+/* --- Basic channel / range management functions --- */
+
+struct a4l_channel *a4l_get_chfeat(struct a4l_subdevice *sb, int idx)
+{
+	int i = (sb->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? idx : 0;
+	return &(sb->chan_desc->chans[i]);
+}
+
+struct a4l_range *a4l_get_rngfeat(struct a4l_subdevice *sb, int chidx, int rngidx)
+{
+	int i = (sb->rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? chidx : 0;
+	return &(sb->rng_desc->rngtabs[i]->rngs[rngidx]);
+}
+
+int a4l_check_chanlist(struct a4l_subdevice *subd,
+		       unsigned char nb_chan, unsigned int *chans)
+{
+	int i, j;
+
+	if (nb_chan > subd->chan_desc->length)
+		return -EINVAL;
+
+	for (i = 0; i < nb_chan; i++) {
+		j = (subd->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? i : 0;
+
+		if (CR_CHAN(chans[i]) >= subd->chan_desc->length) {
+			__a4l_err("a4l_check_chanlist: "
+				  "chan idx out_of range (%u>=%lu)\n",
+				  CR_CHAN(chans[i]), subd->chan_desc->length);
+			return -EINVAL;
+		}
+		if (CR_AREF(chans[i]) != 0 &&
+		    (CR_AREF(chans[i]) & subd->chan_desc->chans[j].flags) == 0)
+		{
+			__a4l_err("a4l_check_chanlist: "
+				  "bad channel type\n");
+			return -EINVAL;
+		}
+	}
+
+	if (subd->rng_desc == NULL)
+		return 0;
+
+	for (i = 0; i < nb_chan; i++) {
+		j = (subd->rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? i : 0;
+
+		if (CR_RNG(chans[i]) > subd->rng_desc->rngtabs[j]->length) {
+			__a4l_err("a4l_check_chanlist: "
+				  "rng idx out_of range (%u>=%u)\n",
+				  CR_RNG(chans[i]),
+				  subd->rng_desc->rngtabs[j]->length);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* --- Upper layer functions --- */
+
+struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv,
+			    void (*setup)(struct a4l_subdevice *))
+{
+	struct a4l_subdevice *subd;
+
+	subd = rtdm_malloc(sizeof(struct a4l_subdevice) + sizeof_priv);
+
+	if(subd != NULL) {
+		memset(subd, 0 , sizeof(struct a4l_subdevice) + sizeof_priv);
+		if(setup != NULL)
+			setup(subd);
+	}
+
+	return subd;
+}
+
+int a4l_add_subd(struct a4l_device * dev, struct a4l_subdevice * subd)
+{
+	struct list_head *this;
+	int i = 0;
+
+	/* Basic checking */
+	if (dev == NULL || subd == NULL)
+		return -EINVAL;
+
+	list_add_tail(&subd->list, &dev->subdvsq);
+
+	subd->dev = dev;
+
+	list_for_each(this, &dev->subdvsq) {
+		i++;
+	}
+
+	subd->idx = --i;
+
+	return i;
+}
+
+struct a4l_subdevice *a4l_get_subd(struct a4l_device *dev, int idx)
+{
+	int i = 0;
+	struct a4l_subdevice *subd = NULL;
+	struct list_head *this;
+
+	/* This function is not optimized as we do not go through the
+	   transfer structure */
+
+	list_for_each(this, &dev->subdvsq) {
+		if(idx == i++)
+			subd = list_entry(this, struct a4l_subdevice, list);
+	}
+
+	return subd;
+}
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_subdinfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	int i, ret = 0;
+	a4l_sbinfo_t *subd_info;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_subdinfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	subd_info = rtdm_malloc(dev->transfer.nb_subd *
+				sizeof(a4l_sbinfo_t));
+	if (subd_info == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < dev->transfer.nb_subd; i++) {
+		subd_info[i].flags = dev->transfer.subds[i]->flags;
+		subd_info[i].status = dev->transfer.subds[i]->status;
+		subd_info[i].nb_chan =
+			(dev->transfer.subds[i]->chan_desc != NULL) ?
+			dev->transfer.subds[i]->chan_desc->length : 0;
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg,
+				   subd_info, dev->transfer.nb_subd *
+				   sizeof(a4l_sbinfo_t)) != 0)
+		ret = -EFAULT;
+
+	rtdm_free(subd_info);
+
+	return ret;
+
+}
+
+int a4l_ioctl_nbchaninfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	a4l_chinfo_arg_t inarg;
+
+	/* Basic checking */
+	if (!dev->flags & A4L_DEV_ATTACHED_NR) {
+		__a4l_err("a4l_ioctl_nbchaninfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg, arg,
+				     sizeof(a4l_chinfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_nbchaninfo: subdevice index "
+			  "out of range\n");
+		return -EINVAL;
+	}
+
+	if(dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL)
+		inarg.info = (void *)0;
+	else
+		inarg.info = (void *)(unsigned long)
+			dev->transfer.subds[inarg.idx_subd]->chan_desc->length;
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg,
+				   &inarg, sizeof(a4l_chinfo_arg_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+int a4l_ioctl_chaninfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	a4l_chinfo_t *chan_info;
+	a4l_chinfo_arg_t inarg;
+	struct a4l_channels_desc *chan_desc;
+	struct a4l_rngdesc *rng_desc;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_chaninfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg, arg,
+				     sizeof(a4l_chinfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_chaninfo: bad subdevice index\n");
+		return -EINVAL;
+	}
+
+	chan_desc = dev->transfer.subds[inarg.idx_subd]->chan_desc;
+	rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc;
+
+	if (chan_desc == NULL) {
+		__a4l_err("a4l_ioctl_chaninfo: no channel descriptor "
+			  "for subdevice %d\n", inarg.idx_subd);
+		return -EINVAL;
+	}
+
+	if(rng_desc == NULL)
+		rng_desc = &a4l_range_fake;
+
+	chan_info = rtdm_malloc(chan_desc->length * sizeof(a4l_chinfo_t));
+	if (chan_info == NULL)
+		return -ENOMEM;
+
+	/* If the channel descriptor is global, the fields are filled
+	   with the same instance of channel descriptor */
+	for (i = 0; i < chan_desc->length; i++) {
+		int j =
+			(chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? i : 0;
+		int k = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? i : 0;
+
+		chan_info[i].chan_flags = chan_desc->chans[j].flags;
+		chan_info[i].nb_bits = chan_desc->chans[j].nb_bits;
+		chan_info[i].nb_rng = rng_desc->rngtabs[k]->length;
+
+		if (chan_desc->mode == A4L_CHAN_GLOBAL_CHANDESC)
+			chan_info[i].chan_flags |= A4L_CHAN_GLOBAL;
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   inarg.info,
+				   chan_info,
+				   chan_desc->length *
+				   sizeof(a4l_chinfo_t)) != 0)
+		return -EFAULT;
+
+	rtdm_free(chan_info);
+
+	return ret;
+}
+
+int a4l_ioctl_nbrnginfo(struct a4l_device_context * cxt, void *arg)
+{
+	int i;
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	a4l_rnginfo_arg_t inarg;
+	struct a4l_rngdesc *rng_desc;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_nbrnginfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg,
+				     arg, sizeof(a4l_rnginfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_nbrnginfo: bad subdevice index\n");
+		return -EINVAL;
+	}
+
+	if (dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL) {
+		__a4l_err("a4l_ioctl_nbrnginfo: no channel descriptor "
+			  "for subdevice %d\n", inarg.idx_subd);
+		return -EINVAL;
+	}
+
+	if (inarg.idx_chan >=
+	    dev->transfer.subds[inarg.idx_subd]->chan_desc->length) {
+		__a4l_err("a4l_ioctl_nbrnginfo: bad channel index\n");
+		return -EINVAL;
+	}
+
+	rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc;
+	if (rng_desc != NULL) {
+		i = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ?
+			inarg.idx_chan : 0;
+		inarg.info = (void *)(unsigned long)
+			rng_desc->rngtabs[i]->length;
+	} else
+		inarg.info = (void *)0;
+
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg,
+				   &inarg, sizeof(a4l_rnginfo_arg_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+int a4l_ioctl_rnginfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+	unsigned int tmp;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_rngdesc *rng_desc;
+	a4l_rnginfo_t *rng_info;
+	a4l_rnginfo_arg_t inarg;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_rnginfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg,
+				     arg, sizeof(a4l_rnginfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_rnginfo: bad subdevice index\n");
+		return -EINVAL;
+	}
+
+	if (dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL) {
+		__a4l_err("a4l_ioctl_rnginfo: no channel descriptor "
+			  "for subdevice %d\n", inarg.idx_subd);
+		return -EINVAL;
+	}
+
+	if (inarg.idx_chan >=
+	    dev->transfer.subds[inarg.idx_subd]->chan_desc->length) {
+		__a4l_err("a4l_ioctl_rnginfo: bad channel index\n");
+		return -EINVAL;
+	}
+
+	rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc;
+	if (rng_desc == NULL) {
+		__a4l_err("a4l_ioctl_rnginfo: no range descriptor "
+			  "for channel %d\n", inarg.idx_chan);
+		return -EINVAL;
+	}
+
+	/* If the range descriptor is global,
+	   we take the first instance */
+	tmp = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ?
+		inarg.idx_chan : 0;
+
+	rng_info = rtdm_malloc(rng_desc->rngtabs[tmp]->length *
+			       sizeof(a4l_rnginfo_t));
+	if (rng_info == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < rng_desc->rngtabs[tmp]->length; i++) {
+		rng_info[i].min = rng_desc->rngtabs[tmp]->rngs[i].min;
+		rng_info[i].max = rng_desc->rngtabs[tmp]->rngs[i].max;
+		rng_info[i].flags = rng_desc->rngtabs[tmp]->rngs[i].flags;
+
+		if (rng_desc->mode == A4L_RNG_GLOBAL_RNGDESC)
+			rng_info[i].flags |= A4L_RNG_GLOBAL;
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   inarg.info,
+				   rng_info,
+				   rng_desc->rngtabs[tmp]->length *
+				   sizeof(a4l_rnginfo_t)) != 0)
+		return -EFAULT;
+
+	rtdm_free(rng_info);
+
+	return ret;
+}
+++ linux-patched/drivers/xenomai/analogy/driver.c	2022-03-21 12:58:31.057872696 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/device.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, driver related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <rtdm/analogy/device.h>
+
+#include "proc.h"
+
+static LIST_HEAD(a4l_drvs);
+
+/* --- Driver list management functions --- */
+
+int a4l_lct_drv(char *pin, struct a4l_driver ** pio)
+{
+	struct list_head *this;
+	int ret = -EINVAL;
+
+	__a4l_dbg(1, core_dbg, "name=%s\n", pin);
+
+	/* Goes through the linked list so as to find
+	   a driver instance with the same name */
+	list_for_each(this, &a4l_drvs) {
+		struct a4l_driver *drv = list_entry(this, struct a4l_driver, list);
+
+		if (strcmp(drv->board_name, pin) == 0) {
+			/* The argument pio can be NULL
+			   if there is no need to retrieve the pointer */
+			if (pio != NULL)
+				*pio = drv;
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int a4l_register_drv(struct a4l_driver * drv)
+{
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	__a4l_dbg(1, core_dbg, "board name=%s\n", drv->board_name);
+
+	if (a4l_lct_drv(drv->board_name, NULL) != 0) {
+		list_add(&drv->list, &a4l_drvs);
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+int a4l_unregister_drv(struct a4l_driver * drv)
+{
+	__a4l_dbg(1, core_dbg, "board name=%s\n", drv->board_name);
+
+	if (a4l_lct_drv(drv->board_name, NULL) == 0) {
+		/* Here, we consider the argument is pointing
+		   to a real driver struct (not a blank structure
+		   with only the name field properly set */
+		list_del(&drv->list);
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+#ifdef CONFIG_PROC_FS
+
+/* --- Driver list proc section --- */
+
+int a4l_rdproc_drvs(struct seq_file *p, void *data)
+{
+	int i = 0;
+	struct list_head *this;
+
+	seq_printf(p, "--  Analogy drivers --\n\n");
+
+	seq_printf(p, "| idx | board name \n");
+
+	list_for_each(this, &a4l_drvs) {
+		struct a4l_driver *drv = list_entry(this, struct a4l_driver, list);
+		seq_printf(p, "|  %02d | %s \n", i++, drv->board_name);
+	}
+	return 0;
+}
+
+#endif /* CONFIG_PROC_FS */
+++ linux-patched/drivers/xenomai/analogy/device.c	2022-03-21 12:58:31.049872774 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/proc.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, device related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <rtdm/analogy/device.h>
+
+#include "proc.h"
+
+static struct a4l_device a4l_devs[A4L_NB_DEVICES];
+
+/* --- Device tab management functions --- */
+
+void a4l_init_devs(void)
+{
+	int i;
+	memset(a4l_devs, 0, A4L_NB_DEVICES * sizeof(struct a4l_device));
+	for (i = 0; i < A4L_NB_DEVICES; i++) {
+		rtdm_lock_init(&a4l_devs[i].lock);
+		a4l_devs[i].transfer.irq_desc.irq = A4L_IRQ_UNUSED;
+	}
+}
+
+int a4l_check_cleanup_devs(void)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < A4L_NB_DEVICES && ret == 0; i++)
+		if (test_bit(A4L_DEV_ATTACHED_NR, &a4l_devs[i].flags))
+			ret = -EBUSY;
+
+	return ret;
+}
+
+void a4l_set_dev(struct a4l_device_context *cxt)
+{
+	/* Retrieve the minor index */
+	const int minor = a4l_get_minor(cxt);
+	/* Fill the dev fields accordingly */
+	cxt->dev = &(a4l_devs[minor]);
+}
+
+/* --- Device tab proc section --- */
+
+#ifdef CONFIG_PROC_FS
+
+int a4l_rdproc_devs(struct seq_file *p, void *data)
+{
+	int i;
+
+	seq_printf(p, "--  Analogy devices --\n\n");
+	seq_printf(p, "| idx | status | driver\n");
+
+	for (i = 0; i < A4L_NB_DEVICES; i++) {
+		char *status, *name;
+
+		/* Gets the device's state */
+		if (a4l_devs[i].flags == 0) {
+			status = "Unused";
+			name = "No driver";
+		} else if (test_bit(A4L_DEV_ATTACHED_NR, &a4l_devs[i].flags)) {
+			status = "Linked";
+			name = a4l_devs[i].driver->driver_name;
+		} else {
+			status = "Broken";
+			name = "Unknown";
+		}
+
+		seq_printf(p, "|  %02d | %s | %s\n", i, status, name);
+	}
+	return 0;
+}
+
+static int a4l_proc_transfer_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, a4l_rdproc_transfer, PDE_DATA(inode));
+}
+
+static const DEFINE_PROC_OPS(a4l_proc_transfer_ops,
+			a4l_proc_transfer_open,
+			single_release,
+			seq_read,
+			NULL);
+
+int a4l_proc_attach(struct a4l_device_context * cxt)
+{
+	int ret = 0;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct proc_dir_entry *entry;
+	char *entry_name;
+
+	/* Allocate the buffer for the file name */
+	entry_name = rtdm_malloc(A4L_NAMELEN + 4);
+	if (entry_name == NULL) {
+		__a4l_err("a4l_proc_attach: failed to allocate buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Create the proc file name */
+	ksformat(entry_name, A4L_NAMELEN + 4, "%02d-%s",
+		 a4l_get_minor(cxt), dev->driver->board_name);
+
+	/* Create the proc entry */
+	entry = proc_create_data(entry_name, 0444, a4l_proc_root,
+				 &a4l_proc_transfer_ops, &dev->transfer);
+	if (entry == NULL) {
+		__a4l_err("a4l_proc_attach: "
+			  "failed to create /proc/analogy/%s\n",
+			  entry_name);
+		ret = -ENOMEM;
+	}
+
+	rtdm_free(entry_name);
+
+	return ret;
+}
+
+void a4l_proc_detach(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	char *entry_name;
+
+	entry_name = rtdm_malloc(A4L_NAMELEN + 4);
+	if (entry_name == NULL) {
+		__a4l_err("a4l_proc_detach: "
+			  "failed to allocate filename buffer\n");
+		return;
+	}
+
+	ksformat(entry_name, A4L_NAMELEN + 4, "%02d-%s",
+		 a4l_get_minor(cxt), dev->driver->board_name);
+
+	remove_proc_entry(entry_name, a4l_proc_root);
+
+	rtdm_free(entry_name);
+}
+
+#else /* !CONFIG_PROC_FS */
+
+int a4l_proc_attach(struct a4l_device_context * cxt)
+{
+	return 0;
+}
+
+void a4l_proc_detach(struct a4l_device_context * cxt)
+{
+}
+
+#endif /* CONFIG_PROC_FS */
+
+/* --- Attach / detach section --- */
+
+int a4l_fill_lnkdesc(struct a4l_device_context * cxt,
+		     a4l_lnkdesc_t * link_arg, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret;
+	char *tmpname = NULL;
+	void *tmpopts = NULL;
+
+	ret = rtdm_safe_copy_from_user(fd,
+				       link_arg, arg, sizeof(a4l_lnkdesc_t));
+	if (ret != 0) {
+		__a4l_err("a4l_fill_lnkdesc: "
+			  "call1(copy_from_user) failed\n");
+		goto out_get_lnkdesc;
+	}
+
+	if (link_arg->bname_size != 0 && link_arg->bname != NULL) {
+		tmpname = rtdm_malloc(link_arg->bname_size + 1);
+		if (tmpname == NULL) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call1(alloc) failed\n");
+			ret = -ENOMEM;
+			goto out_get_lnkdesc;
+		}
+		tmpname[link_arg->bname_size] = 0;
+
+		ret = rtdm_safe_copy_from_user(fd,
+					       tmpname,
+					       link_arg->bname,
+					       link_arg->bname_size);
+		if (ret != 0) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call2(copy_from_user) failed\n");
+			goto out_get_lnkdesc;
+		}
+	} else {
+		__a4l_err("a4l_fill_lnkdesc: board name missing\n");
+		ret = -EINVAL;
+		goto out_get_lnkdesc;
+	}
+
+	if (link_arg->opts_size != 0 && link_arg->opts != NULL) {
+		tmpopts = rtdm_malloc(link_arg->opts_size);
+
+		if (tmpopts == NULL) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call2(alloc) failed\n");
+			ret = -ENOMEM;
+			goto out_get_lnkdesc;
+		}
+
+		ret = rtdm_safe_copy_from_user(fd,
+					       tmpopts,
+					       link_arg->opts,
+					       link_arg->opts_size);
+		if (ret != 0) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call3(copy_from_user) failed\n");
+			goto out_get_lnkdesc;
+		}
+	}
+
+	link_arg->bname = tmpname;
+	link_arg->opts = tmpopts;
+
+      out_get_lnkdesc:
+
+	if (tmpname == NULL) {
+		link_arg->bname = NULL;
+		link_arg->bname_size = 0;
+	}
+
+	if (tmpopts == NULL) {
+		link_arg->opts = NULL;
+		link_arg->opts_size = 0;
+	}
+
+	return ret;
+}
+
+void a4l_free_lnkdesc(struct a4l_device_context * cxt, a4l_lnkdesc_t * link_arg)
+{
+	if (link_arg->bname != NULL)
+		rtdm_free(link_arg->bname);
+
+	if (link_arg->opts != NULL)
+		rtdm_free(link_arg->opts);
+}
+
+int a4l_assign_driver(struct a4l_device_context * cxt,
+			 struct a4l_driver * drv, a4l_lnkdesc_t * link_arg)
+{
+	int ret = 0;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	dev->driver = drv;
+	INIT_LIST_HEAD(&dev->subdvsq);
+
+	if (drv->privdata_size == 0)
+		__a4l_dbg(1, core_dbg, " warning! "
+				       "the field priv will not be usable\n");
+	else {
+		dev->priv = rtdm_malloc(drv->privdata_size);
+		if (dev->priv == NULL) {
+			__a4l_err("a4l_assign_driver: "
+				  "call(alloc) failed\n");
+			ret = -ENOMEM;
+			goto out_assign_driver;
+		}
+
+		/* Initialize the private data even if it not our role
+		   (the driver should do it), that may prevent hard to
+		   find bugs */
+		memset(dev->priv, 0, drv->privdata_size);
+	}
+
+	if ((ret = drv->attach(dev, link_arg)) != 0)
+		__a4l_err("a4l_assign_driver: "
+			  "call(drv->attach) failed (ret=%d)\n",
+		     ret);
+
+out_assign_driver:
+
+	/* Increments module's count */
+	if (ret == 0 && (!try_module_get(drv->owner))) {
+		__a4l_err("a4l_assign_driver: "
+			  "driver's owner field wrongly set\n");
+		ret = -ENODEV;
+	}
+
+	if (ret != 0 && dev->priv != NULL) {
+		rtdm_free(dev->priv);
+		dev->driver = NULL;
+	}
+
+	return ret;
+}
+
+int a4l_release_driver(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_subdevice *subd, *tmp;
+	int ret = 0;
+
+	if ((ret = dev->driver->detach(dev)) != 0)
+		goto out_release_driver;
+
+	module_put(dev->driver->owner);
+
+	/* In case, the driver developer did not free the subdevices */
+	if (!list_empty(&dev->subdvsq))
+		list_for_each_entry_safe(subd, tmp, &dev->subdvsq, list) {
+			list_del(&subd->list);
+			rtdm_free(subd);
+		}
+
+	/* Free the private field */
+	if (dev->priv)
+		rtdm_free(dev->priv);
+
+	dev->driver = NULL;
+
+out_release_driver:
+	return ret;
+}
+
+int a4l_device_attach(struct a4l_device_context * cxt, void *arg)
+{
+	int ret = 0;
+	a4l_lnkdesc_t link_arg;
+	struct a4l_driver *drv = NULL;
+
+	if ((ret = a4l_fill_lnkdesc(cxt, &link_arg, arg)) != 0)
+		goto out_attach;
+
+	if ((ret = a4l_lct_drv(link_arg.bname, &drv)) != 0) {
+		__a4l_err("a4l_device_attach: "
+			  "cannot find board name %s\n", link_arg.bname);
+		goto out_attach;
+	}
+
+	if ((ret = a4l_assign_driver(cxt, drv, &link_arg)) != 0)
+		goto out_attach;
+
+      out_attach:
+	a4l_free_lnkdesc(cxt, &link_arg);
+	return ret;
+}
+
+int a4l_device_detach(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	if (dev->driver == NULL) {
+		__a4l_err("a4l_device_detach: "
+			  "incoherent state, driver not reachable\n");
+		return -ENXIO;
+	}
+
+	return a4l_release_driver(cxt);
+}
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_devcfg(struct a4l_device_context * cxt, void *arg)
+{
+	int ret = 0;
+
+	if (rtdm_in_rt_context())
+		return -ENOSYS;
+
+	if (arg == NULL) {
+		/* Basic checking */
+		if (!test_bit(A4L_DEV_ATTACHED_NR, &(a4l_get_dev(cxt)->flags))) {
+			__a4l_err("a4l_ioctl_devcfg: "
+				  "free device, no driver to detach\n");
+			return -EINVAL;
+		}
+		/* Pre-cleanup of the transfer structure, we ensure
+		   that nothing is busy */
+		if ((ret = a4l_precleanup_transfer(cxt)) != 0)
+			return ret;
+		/* Remove the related proc file */
+		a4l_proc_detach(cxt);
+		/* Free the device and the driver from each other */
+		if ((ret = a4l_device_detach(cxt)) == 0)
+			clear_bit(A4L_DEV_ATTACHED_NR,
+				  &(a4l_get_dev(cxt)->flags));
+		/* Free the transfer structure and its related data */
+		if ((ret = a4l_cleanup_transfer(cxt)) != 0)
+			return ret;
+	} else {
+		/* Basic checking */
+		if (test_bit
+		    (A4L_DEV_ATTACHED_NR, &(a4l_get_dev(cxt)->flags))) {
+			__a4l_err("a4l_ioctl_devcfg: "
+				  "linked device, cannot attach more driver\n");
+			return -EINVAL;
+		}
+		/* Pre-initialization of the transfer structure */
+		a4l_presetup_transfer(cxt);
+		/* Link the device with the driver */
+		if ((ret = a4l_device_attach(cxt, arg)) != 0)
+			return ret;
+		/* Create the transfer structure and
+		   the related proc file */
+		if ((ret = a4l_setup_transfer(cxt)) != 0 ||
+		    (ret = a4l_proc_attach(cxt)) != 0)
+			a4l_device_detach(cxt);
+		else
+			set_bit(A4L_DEV_ATTACHED_NR,
+				&(a4l_get_dev(cxt)->flags));
+	}
+
+	return ret;
+}
+
+int a4l_ioctl_devinfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	a4l_dvinfo_t info;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	memset(&info, 0, sizeof(a4l_dvinfo_t));
+
+	if (test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		int len = (strlen(dev->driver->board_name) > A4L_NAMELEN) ?
+		    A4L_NAMELEN : strlen(dev->driver->board_name);
+
+		memcpy(info.board_name, dev->driver->board_name, len);
+
+		len = (strlen(dev->driver->driver_name) > A4L_NAMELEN) ?
+		    A4L_NAMELEN : strlen(dev->driver->driver_name);
+
+		memcpy(info.driver_name, dev->driver->driver_name, len);
+
+		info.nb_subd = dev->transfer.nb_subd;
+		/* TODO: for API compatibility issue, find the first
+		   read subdevice and write subdevice */
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg, &info, sizeof(a4l_dvinfo_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+++ linux-patched/drivers/xenomai/analogy/proc.h	2022-03-21 12:58:31.042872842 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/command.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, procfs related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ANALOGY_PROC_H__
+#define __ANALOGY_PROC_H__
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PROC_FS
+extern struct proc_dir_entry *a4l_proc_root;
+#endif /* CONFIG_PROC_FS */
+
+#endif /* __KERNEL__ */
+
+#endif /* __ANALOGY_PROC_H__ */
+++ linux-patched/drivers/xenomai/analogy/command.c	2022-03-21 12:58:31.035872910 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/intel/parport.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, command related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/mman.h>
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+/* --- Command descriptor management functions --- */
+int a4l_fill_cmddesc(struct a4l_device_context *cxt, struct a4l_cmd_desc *desc,
+		     unsigned int **chan_descs, void *arg)
+{
+	unsigned int *tmpchans = NULL;
+	int ret = 0;
+
+	ret = rtdm_safe_copy_from_user(rtdm_private_to_fd(cxt),
+				       desc, arg, sizeof(struct a4l_cmd_desc));
+	if (ret != 0)
+		goto out_cmddesc;
+
+
+	if (desc->nb_chan == 0) {
+		ret = -EINVAL;
+		goto out_cmddesc;
+	}
+
+	tmpchans = rtdm_malloc(desc->nb_chan * sizeof(unsigned int));
+	if (tmpchans == NULL) {
+		ret = -ENOMEM;
+		goto out_cmddesc;
+	}
+
+	ret = rtdm_safe_copy_from_user(rtdm_private_to_fd(cxt),
+				       tmpchans,
+				       desc->chan_descs,
+				       desc->nb_chan * sizeof(unsigned int));
+	if (ret != 0) {
+		__a4l_err("%s invalid arguments \n", __FUNCTION__);
+		goto out_cmddesc;
+	}
+
+	*chan_descs = desc->chan_descs;
+	desc->chan_descs = tmpchans;
+
+	__a4l_dbg(1, core_dbg, "desc dump: \n");
+	__a4l_dbg(1, core_dbg, "\t->idx_subd=%u\n", desc->idx_subd);
+	__a4l_dbg(1, core_dbg, "\t->flags=%lu\n", desc->flags);
+	__a4l_dbg(1, core_dbg, "\t->nb_chan=%u\n", desc->nb_chan);
+	__a4l_dbg(1, core_dbg, "\t->chan_descs=0x%x\n", *desc->chan_descs);
+	__a4l_dbg(1, core_dbg, "\t->data_len=%u\n", desc->data_len);
+	__a4l_dbg(1, core_dbg, "\t->pdata=0x%p\n", desc->data);
+
+	out_cmddesc:
+
+	if (ret != 0) {
+		__a4l_err("a4l_fill_cmddesc: %d \n", ret);
+		if (tmpchans != NULL)
+			rtdm_free(tmpchans);
+		desc->chan_descs = NULL;
+	}
+
+	return ret;
+}
+
+void a4l_free_cmddesc(struct a4l_cmd_desc * desc)
+{
+	if (desc->chan_descs != NULL)
+		rtdm_free(desc->chan_descs);
+}
+
+int a4l_check_cmddesc(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_subdevice *subd;
+
+	if (desc->idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_check_cmddesc: "
+			  "subdevice index out of range (idx=%u)\n",
+			  desc->idx_subd);
+		return -EINVAL;
+	}
+
+	subd = dev->transfer.subds[desc->idx_subd];
+
+	if ((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED) {
+		__a4l_err("a4l_check_cmddesc: "
+			  "subdevice type incoherent\n");
+		return -EIO;
+	}
+
+	if (!(subd->flags & A4L_SUBD_CMD)) {
+		__a4l_err("a4l_check_cmddesc: operation not supported, "
+			  "synchronous only subdevice\n");
+		return -EIO;
+	}
+
+	if (test_bit(A4L_SUBD_BUSY, &subd->status)) {
+		__a4l_err("a4l_check_cmddesc: subdevice busy\n");
+		return -EBUSY;
+	}
+
+	return a4l_check_chanlist(dev->transfer.subds[desc->idx_subd],
+				  desc->nb_chan, desc->chan_descs);
+}
+
+/* --- Command checking functions --- */
+
+int a4l_check_generic_cmdcnt(struct a4l_cmd_desc * desc)
+{
+	unsigned int tmp1, tmp2;
+
+	/* Makes sure trigger sources are trivially valid */
+	tmp1 =
+	desc->start_src & ~(TRIG_NOW | TRIG_INT | TRIG_EXT | TRIG_FOLLOW);
+	tmp2 = desc->start_src & (TRIG_NOW | TRIG_INT | TRIG_EXT | TRIG_FOLLOW);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: start_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->scan_begin_src & ~(TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW);
+	tmp2 = desc->scan_begin_src & (TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: scan_begin_src, , weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->convert_src & ~(TRIG_TIMER | TRIG_EXT | TRIG_NOW);
+	tmp2 = desc->convert_src & (TRIG_TIMER | TRIG_EXT | TRIG_NOW);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: convert_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->scan_end_src & ~(TRIG_COUNT);
+	if (tmp1 != 0) {
+		__a4l_err("a4l_check_cmddesc: scan_end_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->stop_src & ~(TRIG_COUNT | TRIG_NONE);
+	tmp2 = desc->stop_src & (TRIG_COUNT | TRIG_NONE);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: stop_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	/* Makes sure trigger sources are unique */
+	if (desc->start_src != TRIG_NOW &&
+	    desc->start_src != TRIG_INT &&
+	    desc->start_src != TRIG_EXT && desc->start_src != TRIG_FOLLOW) {
+		__a4l_err("a4l_check_cmddesc: start_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	if (desc->scan_begin_src != TRIG_TIMER &&
+	    desc->scan_begin_src != TRIG_EXT &&
+	    desc->scan_begin_src != TRIG_FOLLOW) {
+		__a4l_err("a4l_check_cmddesc: scan_begin_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	if (desc->convert_src != TRIG_TIMER &&
+	    desc->convert_src != TRIG_EXT && desc->convert_src != TRIG_NOW) {
+		__a4l_err("a4l_check_cmddesc: convert_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	if (desc->stop_src != TRIG_COUNT && desc->stop_src != TRIG_NONE) {
+		__a4l_err("a4l_check_cmddesc: stop_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	/* Makes sure arguments are trivially compatible */
+	tmp1 = desc->start_src & (TRIG_NOW | TRIG_FOLLOW | TRIG_INT);
+	tmp2 = desc->start_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no start_arg expected\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->scan_begin_src & TRIG_FOLLOW;
+	tmp2 = desc->scan_begin_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no scan_begin_arg expected\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->convert_src & TRIG_NOW;
+	tmp2 = desc->convert_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no convert_arg expected\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->stop_src & TRIG_NONE;
+	tmp2 = desc->stop_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no stop_arg expected\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int a4l_check_specific_cmdcnt(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc)
+{
+	unsigned int tmp1, tmp2;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_cmd_desc *cmd_mask = dev->transfer.subds[desc->idx_subd]->cmd_mask;
+
+	if (cmd_mask == NULL)
+		return 0;
+
+	if (cmd_mask->start_src != 0) {
+		tmp1 = desc->start_src & ~(cmd_mask->start_src);
+		tmp2 = desc->start_src & (cmd_mask->start_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: start_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->scan_begin_src != 0) {
+		tmp1 = desc->scan_begin_src & ~(cmd_mask->scan_begin_src);
+		tmp2 = desc->scan_begin_src & (cmd_mask->scan_begin_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: scan_begin_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->convert_src != 0) {
+		tmp1 = desc->convert_src & ~(cmd_mask->convert_src);
+		tmp2 = desc->convert_src & (cmd_mask->convert_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: convert_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->scan_end_src != 0) {
+		tmp1 = desc->scan_end_src & ~(cmd_mask->scan_end_src);
+		if (tmp1 != 0) {
+			__a4l_err("a4l_check_cmddesc: scan_end_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->stop_src != 0) {
+		tmp1 = desc->stop_src & ~(cmd_mask->stop_src);
+		tmp2 = desc->stop_src & (cmd_mask->stop_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: stop_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* --- IOCTL / FOPS function --- */
+
+int a4l_ioctl_cmd(struct a4l_device_context * ctx, void *arg)
+{
+	int ret = 0, simul_flag = 0;
+	struct a4l_cmd_desc *cmd_desc = NULL;
+	struct a4l_device *dev = a4l_get_dev(ctx);
+	unsigned int *chan_descs, *tmp;
+	struct a4l_subdevice *subd;
+
+	/* The command launching cannot be done in real-time because
+	   of some possible buffer allocations in the drivers */
+	if (rtdm_in_rt_context())
+		return -ENOSYS;
+
+	/* Basically check the device */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_cmd: cannot command "
+			  "an unattached device\n");
+		return -EINVAL;
+	}
+
+	/* Allocates the command */
+	cmd_desc = (struct a4l_cmd_desc *) rtdm_malloc(sizeof(struct a4l_cmd_desc));
+	if (cmd_desc == NULL)
+		return -ENOMEM;
+	memset(cmd_desc, 0, sizeof(struct a4l_cmd_desc));
+
+	/* Gets the command */
+	ret = a4l_fill_cmddesc(ctx, cmd_desc, &chan_descs, arg);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	/* Checks the command */
+	ret = a4l_check_cmddesc(ctx, cmd_desc);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	ret = a4l_check_generic_cmdcnt(cmd_desc);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	ret = a4l_check_specific_cmdcnt(ctx, cmd_desc);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	__a4l_dbg(1, core_dbg,"1st cmd checks passed\n");
+	subd = dev->transfer.subds[cmd_desc->idx_subd];
+
+	/* Tests the command with the cmdtest function */
+	if (cmd_desc->flags & A4L_CMD_SIMUL) {
+		simul_flag = 1;
+
+		if (!subd->do_cmdtest) {
+			__a4l_err("a4l_ioctl_cmd: driver's cmd_test NULL\n");
+			ret = -EINVAL;
+			goto out_ioctl_cmd;
+		}
+
+		ret = subd->do_cmdtest(subd, cmd_desc);
+		if (ret != 0) {
+			__a4l_err("a4l_ioctl_cmd: driver's cmd_test failed\n");
+			goto out_ioctl_cmd;
+		}
+		__a4l_dbg(1, core_dbg, "driver's cmd checks passed\n");
+		goto out_ioctl_cmd;
+	}
+
+
+	/* Gets the transfer system ready */
+	ret = a4l_setup_buffer(ctx, cmd_desc);
+	if (ret < 0)
+		goto out_ioctl_cmd;
+
+	/* Eventually launches the command */
+	ret = subd->do_cmd(subd, cmd_desc);
+
+	if (ret != 0) {
+		a4l_cancel_buffer(ctx);
+		goto out_ioctl_cmd;
+	}
+
+	out_ioctl_cmd:
+
+	if (simul_flag) {
+		/* copy the kernel based descriptor */
+		tmp = cmd_desc->chan_descs;
+		/* return the user based descriptor */
+		cmd_desc->chan_descs = chan_descs;
+		rtdm_safe_copy_to_user(rtdm_private_to_fd(ctx), arg, cmd_desc,
+				       sizeof(struct a4l_cmd_desc));
+		/* make sure we release the memory associated to the kernel */
+		cmd_desc->chan_descs = tmp;
+
+	}
+
+	if (ret != 0 || simul_flag == 1) {
+		a4l_free_cmddesc(cmd_desc);
+		rtdm_free(cmd_desc);
+	}
+
+	return ret;
+}
+++ linux-patched/drivers/xenomai/analogy/intel/parport.c	2022-03-21 12:58:31.027872989 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/intel/Kconfig	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy driver for standard parallel port
+ * Copyright (C) 1998,2001 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+   A cheap and easy way to get a few more digital I/O lines.  Steal
+   additional parallel ports from old computers or your neighbors'
+   computers.
+
+   Attach options list:
+   0: I/O port base for the parallel port.
+   1: IRQ
+
+   Parallel Port Lines:
+
+   pin     subdev  chan    aka
+   ---     ------  ----    ---
+   1       2       0       strobe
+   2       0       0       data 0
+   3       0       1       data 1
+   4       0       2       data 2
+   5       0       3       data 3
+   6       0       4       data 4
+   7       0       5       data 5
+   8       0       6       data 6
+   9       0       7       data 7
+   10      1       3       acknowledge
+   11      1       4       busy
+   12      1       2       output
+   13      1       1       printer selected
+   14      2       1       auto LF
+   15      1       0       error
+   16      2       2       init
+   17      2       3       select printer
+   18-25   ground
+
+   Notes:
+
+   Subdevices 0 is digital I/O, subdevice 1 is digital input, and
+   subdevice 2 is digital output.  Unlike other Analogy devices,
+   subdevice 0 defaults to output.
+
+   Pins 13 and 14 are inverted once by Analogy and once by the
+   hardware, thus cancelling the effect.
+
+   Pin 1 is a strobe, thus acts like one.  There's no way in software
+   to change this, at least on a standard parallel port.
+
+   Subdevice 3 pretends to be a digital input subdevice, but it always
+   returns 0 when read.  However, if you run a command with
+   scan_begin_src=TRIG_EXT, it uses pin 10 as a external triggering
+   pin, which can be used to wake up tasks.
+
+   see http://www.beyondlogic.org/ for information.
+   or http://www.linux-magazin.de/ausgabe/1999/10/IO/io.html
+*/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>		/* For inb/outb */
+#include <rtdm/analogy/device.h>
+
+#define PARPORT_SIZE 3
+
+#define PARPORT_A 0
+#define PARPORT_B 1
+#define PARPORT_C 2
+
+#define DEFAULT_ADDRESS 0x378
+#define DEFAULT_IRQ 7
+
+typedef struct parport_subd_priv {
+	unsigned long io_bits;
+} parport_spriv_t;
+
+typedef struct parport_priv {
+	unsigned long io_base;
+	unsigned int a_data;
+	unsigned int c_data;
+	int enable_irq;
+} parport_priv_t;
+
+#define devpriv ((parport_priv_t *)(dev->priv))
+
+static int parport_insn_a(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	if (data[0]) {
+		devpriv->a_data &= ~data[0];
+		devpriv->a_data |= (data[0] & data[1]);
+
+		outb(devpriv->a_data, devpriv->io_base + PARPORT_A);
+	}
+
+	data[1] = inb(devpriv->io_base + PARPORT_A);
+
+	return 0;
+}
+
+static int parport_insn_config_a(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	parport_spriv_t *spriv = (parport_spriv_t *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	/* No need to check the channel descriptor; the input / output
+	   setting is global for all channels */
+
+	switch (data[0]) {
+
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		spriv->io_bits = 0xff;
+		devpriv->c_data &= ~(1 << 5);
+		break;
+
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		spriv->io_bits = 0;
+		devpriv->c_data |= (1 << 5);
+		break;
+
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (spriv->io_bits == 0xff) ?
+			A4L_OUTPUT: A4L_INPUT;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	return 0;
+}
+
+static int parport_insn_b(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	if (data[0]) {
+		/* should writes be ignored? */
+	}
+
+	data[1] = (inb(devpriv->io_base + PARPORT_B) >> 3);
+
+	return 0;
+}
+
+static int parport_insn_c(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] &= 0x0f;
+	if (data[0]) {
+		devpriv->c_data &= ~data[0];
+		devpriv->c_data |= (data[0] & data[1]);
+
+		outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+	}
+
+	data[1] = devpriv->c_data & 0xf;
+
+	return 2;
+}
+
+static int parport_intr_insn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	uint8_t *data = (uint8_t *)insn->data;
+
+	if (insn->data_size < sizeof(uint8_t))
+		return -EINVAL;
+
+	data[1] = 0;
+	return 0;
+}
+
+static struct a4l_cmd_desc parport_intr_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW,
+	.scan_begin_src = TRIG_EXT,
+	.convert_src = TRIG_FOLLOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+static int parport_intr_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc * cmd)
+{
+
+	if (cmd->start_arg != 0) {
+		return -EINVAL;
+	}
+	if (cmd->scan_begin_arg != 0) {
+		return -EINVAL;
+	}
+	if (cmd->convert_arg != 0) {
+		return -EINVAL;
+	}
+	if (cmd->scan_end_arg != 1) {
+		return -EINVAL;
+	}
+	if (cmd->stop_arg != 0) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int parport_intr_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	devpriv->c_data |= 0x10;
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	devpriv->enable_irq = 1;
+
+	return 0;
+}
+
+static void parport_intr_cancel(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	a4l_info(dev, "cancel in progress\n");
+
+	devpriv->c_data &= ~0x10;
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	devpriv->enable_irq = 0;
+}
+
+static int parport_interrupt(unsigned int irq, void *d)
+{
+	struct a4l_device *dev = d;
+	struct a4l_subdevice *subd = a4l_get_subd(dev, 3);
+
+	if (!devpriv->enable_irq) {
+		a4l_err(dev, "parport_interrupt: bogus irq, ignored\n");
+		return IRQ_NONE;
+	}
+
+	a4l_buf_put(subd, 0, sizeof(unsigned int));
+	a4l_buf_evt(subd, 0);
+
+	return 0;
+}
+
+
+/* --- Channels descriptor --- */
+
+static struct a4l_channels_desc parport_chan_desc_a = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc parport_chan_desc_b = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 5,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc parport_chan_desc_c = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 4,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc parport_chan_desc_intr = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 1,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+/* --- Subdevice initialization functions --- */
+
+static void setup_subd_a(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DIO;
+	subd->chan_desc = &parport_chan_desc_a;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_insn_a;
+	subd->insn_config = parport_insn_config_a;
+}
+
+static void setup_subd_b(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DI;
+	subd->chan_desc = &parport_chan_desc_b;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_insn_b;
+}
+
+static void setup_subd_c(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DO;
+	subd->chan_desc = &parport_chan_desc_c;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_insn_c;
+}
+
+static void setup_subd_intr(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DI;
+	subd->chan_desc = &parport_chan_desc_intr;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_intr_insn;
+	subd->cmd_mask = &parport_intr_cmd_mask;
+	subd->do_cmdtest = parport_intr_cmdtest;
+	subd->do_cmd = parport_intr_cmd;
+	subd->cancel = parport_intr_cancel;
+}
+
+static void (*setup_subds[3])(struct a4l_subdevice *) = {
+	setup_subd_a,
+	setup_subd_b,
+	setup_subd_c
+};
+
+static int dev_parport_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	int i, err = 0, irq = A4L_IRQ_UNUSED;
+	unsigned long io_base;
+
+	if(arg->opts == NULL || arg->opts_size < sizeof(unsigned long)) {
+
+		a4l_warn(dev,
+			 "dev_parport_attach: no attach options specified, "
+			 "taking default options (addr=0x%x, irq=%d)\n",
+			 DEFAULT_ADDRESS, DEFAULT_IRQ);
+
+		io_base = DEFAULT_ADDRESS;
+		irq = DEFAULT_IRQ;
+	} else {
+
+		io_base = ((unsigned long *)arg->opts)[0];
+
+		if (arg->opts_size >= 2 * sizeof(unsigned long))
+			irq = (int) ((unsigned long *)arg->opts)[1];
+	}
+
+	if (!request_region(io_base, PARPORT_SIZE, "analogy_parport")) {
+		a4l_err(dev, "dev_parport_attach: I/O port conflict");
+		return -EIO;
+	}
+
+	a4l_info(dev, "address = 0x%lx\n", io_base);
+
+	for (i = 0; i < 3; i++) {
+
+		struct a4l_subdevice *subd = a4l_alloc_subd(sizeof(parport_spriv_t),
+						  setup_subds[i]);
+		if (subd == NULL)
+			return -ENOMEM;
+
+		err = a4l_add_subd(dev, subd);
+		if (err != i)
+			return err;
+	}
+
+	if (irq != A4L_IRQ_UNUSED) {
+
+		struct a4l_subdevice *subd;
+
+		a4l_info(dev, "irq = %d\n", irq);
+
+		err = a4l_request_irq(dev, irq, parport_interrupt, 0, dev);
+		if (err < 0) {
+			a4l_err(dev, "dev_parport_attach: irq not available\n");
+			return err;
+		}
+
+		subd = a4l_alloc_subd(0, setup_subd_intr);
+		if (subd == NULL)
+			return -ENOMEM;
+
+		err = a4l_add_subd(dev, subd);
+		if (err < 0)
+			return err;
+	}
+
+	devpriv->io_base = io_base;
+
+	devpriv->a_data = 0;
+	outb(devpriv->a_data, devpriv->io_base + PARPORT_A);
+
+	devpriv->c_data = 0;
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	return 0;
+}
+
+static int dev_parport_detach(struct a4l_device *dev)
+{
+	int err = 0;
+
+	if (devpriv->io_base != 0)
+		release_region(devpriv->io_base, PARPORT_SIZE);
+
+	if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) {
+		a4l_free_irq(dev, a4l_get_irq(dev));
+	}
+
+
+	return err;
+}
+
+static struct a4l_driver drv_parport = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_parport",
+	.driver_name = "parport",
+	.attach = dev_parport_attach,
+	.detach = dev_parport_detach,
+	.privdata_size = sizeof(parport_priv_t),
+};
+
+static int __init drv_parport_init(void)
+{
+	return a4l_register_drv(&drv_parport);
+}
+
+static void __exit drv_parport_cleanup(void)
+{
+	a4l_unregister_drv(&drv_parport);
+}
+
+MODULE_DESCRIPTION("Analogy driver for standard parallel port");
+MODULE_LICENSE("GPL");
+
+module_init(drv_parport_init);
+module_exit(drv_parport_cleanup);
+++ linux-patched/drivers/xenomai/analogy/intel/Kconfig	2022-03-21 12:58:31.020873057 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/intel/8255.c	1970-01-01 01:00:00.000000000 +0100
+
+config XENO_DRIVERS_ANALOGY_8255
+	depends on XENO_DRIVERS_ANALOGY
+	tristate "8255 driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_PARPORT
+	depends on XENO_DRIVERS_ANALOGY && X86
+	tristate "Standard parallel port driver"
+	default n
+++ linux-patched/drivers/xenomai/analogy/intel/8255.c	2022-03-21 12:58:31.012873135 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/intel/8255.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy subdevice driver for 8255 chip
+ * Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <rtdm/analogy/device.h>
+
+#include "8255.h"
+
+#define CALLBACK_ARG		(((subd_8255_t *)subd->priv)->cb_arg)
+#define CALLBACK_FUNC		(((subd_8255_t *)subd->priv)->cb_func)
+
+/* Channels descriptor */
+static struct a4l_channels_desc chandesc_8255 = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 24,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, sizeof(sampl_t)},
+	},
+};
+
+/* Command options mask */
+static struct a4l_cmd_desc cmd_mask_8255 = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW,
+	.scan_begin_src = TRIG_EXT,
+	.convert_src = TRIG_FOLLOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+void a4l_subdev_8255_interrupt(struct a4l_subdevice *subd)
+{
+	sampl_t d;
+
+	/* Retrieve the sample... */
+	d = CALLBACK_FUNC(0, _8255_DATA, 0, CALLBACK_ARG);
+	d |= (CALLBACK_FUNC(0, _8255_DATA + 1, 0, CALLBACK_ARG) << 8);
+
+	/* ...and send it */
+	a4l_buf_put(subd, &d, sizeof(sampl_t));
+
+	a4l_buf_evt(subd, 0);
+}
+EXPORT_SYMBOL_GPL(a4l_subdev_8255_interrupt);
+
+static int subdev_8255_cb(int dir, int port, int data, unsigned long arg)
+{
+	unsigned long iobase = arg;
+
+	if (dir) {
+		outb(data, iobase + port);
+		return 0;
+	} else {
+		return inb(iobase + port);
+	}
+}
+
+static void do_config(struct a4l_subdevice *subd)
+{
+	int config;
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+
+	config = CR_CW;
+	/* 1 in io_bits indicates output, 1 in config indicates input */
+	if (!(subd_8255->io_bits & 0x0000ff))
+		config |= CR_A_IO;
+	if (!(subd_8255->io_bits & 0x00ff00))
+		config |= CR_B_IO;
+	if (!(subd_8255->io_bits & 0x0f0000))
+		config |= CR_C_LO_IO;
+	if (!(subd_8255->io_bits & 0xf00000))
+		config |= CR_C_HI_IO;
+	CALLBACK_FUNC(1, _8255_CR, config, CALLBACK_ARG);
+}
+
+int subd_8255_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	/* FIXME */
+	return 0;
+}
+
+int subd_8255_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	if (cmd->start_arg != 0) {
+		cmd->start_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->scan_begin_arg != 0) {
+		cmd->scan_begin_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->convert_arg != 0) {
+		cmd->convert_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->scan_end_arg != 1) {
+		cmd->scan_end_arg = 1;
+		return -EINVAL;
+	}
+	if (cmd->stop_arg != 0) {
+		cmd->stop_arg = 0;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void subd_8255_cancel(struct a4l_subdevice *subd)
+{
+	/* FIXME */
+}
+
+int subd_8255_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+	uint32_t *data = (uint32_t *)insn->data;
+
+	if (data[0]) {
+
+		subd_8255->status &= ~data[0];
+		subd_8255->status |= (data[0] & data[1]);
+
+		if (data[0] & 0xff)
+			CALLBACK_FUNC(1, _8255_DATA,
+				      subd_8255->status & 0xff, CALLBACK_ARG);
+		if (data[0] & 0xff00)
+			CALLBACK_FUNC(1, _8255_DATA + 1,
+				      (subd_8255->status >> 8) & 0xff,
+				      CALLBACK_ARG);
+		if (data[0] & 0xff0000)
+			CALLBACK_FUNC(1, _8255_DATA + 2,
+				      (subd_8255->status >> 16) & 0xff,
+				      CALLBACK_ARG);
+	}
+
+	data[1] = CALLBACK_FUNC(0, _8255_DATA, 0, CALLBACK_ARG);
+	data[1] |= (CALLBACK_FUNC(0, _8255_DATA + 1, 0, CALLBACK_ARG) << 8);
+	data[1] |= (CALLBACK_FUNC(0, _8255_DATA + 2, 0, CALLBACK_ARG) << 16);
+
+	return 0;
+}
+
+int subd_8255_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	unsigned int mask;
+	unsigned int bits;
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	mask = 1 << CR_CHAN(insn->chan_desc);
+
+	if (mask & 0x0000ff) {
+		bits = 0x0000ff;
+	} else if (mask & 0x00ff00) {
+		bits = 0x00ff00;
+	} else if (mask & 0x0f0000) {
+		bits = 0x0f0000;
+	} else {
+		bits = 0xf00000;
+	}
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		subd_8255->io_bits &= ~bits;
+		break;
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		subd_8255->io_bits |= bits;
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (subd_8255->io_bits & bits) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	do_config(subd);
+
+	return 0;
+}
+
+void a4l_subdev_8255_init(struct a4l_subdevice *subd)
+{
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+	/* Initializes the subdevice structure */
+	memset(subd, 0, sizeof(struct a4l_subdevice));
+
+	/* Subdevice filling part */
+
+	subd->flags = A4L_SUBD_DIO;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->chan_desc = &chandesc_8255;
+	subd->insn_bits = subd_8255_insn_bits;
+	subd->insn_config = subd_8255_insn_config;
+
+	if(subd_8255->have_irq) {
+		subd->cmd_mask = &cmd_mask_8255;
+		subd->do_cmdtest = subd_8255_cmdtest;
+		subd->do_cmd = subd_8255_cmd;
+		subd->cancel = subd_8255_cancel;
+	}
+
+	/* 8255 setting part */
+
+	if(CALLBACK_FUNC == NULL)
+		CALLBACK_FUNC = subdev_8255_cb;
+
+	do_config(subd);
+}
+EXPORT_SYMBOL_GPL(a4l_subdev_8255_init);
+
+/*
+
+  Start of the 8255 standalone device
+
+*/
+
+static int dev_8255_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	unsigned long *addrs;
+	int i, err = 0;
+
+	if(arg->opts == NULL || arg->opts_size == 0) {
+		a4l_err(dev,
+			"dev_8255_attach: unable to detect any 8255 chip, "
+			"chips addresses must be passed as attach arguments\n");
+		return -EINVAL;
+	}
+
+	addrs = (unsigned long*) arg->opts;
+
+	for(i = 0; i < (arg->opts_size / sizeof(unsigned long)); i++) {
+		struct a4l_subdevice * subd;
+		subd_8255_t *subd_8255;
+
+		subd = a4l_alloc_subd(sizeof(subd_8255_t), NULL);
+		if(subd == NULL) {
+			a4l_err(dev,
+				"dev_8255_attach: "
+				"unable to allocate subdevice\n");
+			/* There is no need to free previously
+			   allocated structure(s), the analogy layer will
+			   do it for us */
+			err = -ENOMEM;
+			goto out_attach;
+		}
+
+		memset(subd, 0, sizeof(struct a4l_subdevice));
+		memset(subd->priv, 0, sizeof(subd_8255_t));
+
+		subd_8255 = (subd_8255_t *)subd->priv;
+
+		if(request_region(addrs[i], _8255_SIZE, "Analogy 8255") == 0) {
+			subd->flags = A4L_SUBD_UNUSED;
+			a4l_warn(dev,
+				 "dev_8255_attach: "
+				 "I/O port conflict at 0x%lx\n", addrs[i]);
+		}
+		else {
+			subd_8255->cb_arg = addrs[i];
+			a4l_subdev_8255_init(subd);
+		}
+
+		err = a4l_add_subd(dev, subd);
+		if(err < 0) {
+			a4l_err(dev,
+				"dev_8255_attach: "
+				"a4l_add_subd() failed (err=%d)\n", err);
+			goto out_attach;
+		}
+	}
+
+out_attach:
+	return err;
+}
+
+static int dev_8255_detach(struct a4l_device *dev)
+{
+	struct a4l_subdevice *subd;
+	int i = 0;
+
+	while((subd = a4l_get_subd(dev, i++)) != NULL) {
+		subd_8255_t *subd_8255 = (subd_8255_t *) subd->priv;
+		if(subd_8255 != NULL && subd_8255->cb_arg != 0)
+			release_region(subd_8255->cb_arg, _8255_SIZE);
+	}
+
+	return 0;
+}
+
+static struct a4l_driver drv_8255 = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_8255",
+	.driver_name = "8255",
+	.attach = dev_8255_attach,
+	.detach = dev_8255_detach,
+	.privdata_size = 0,
+};
+
+static int __init drv_8255_init(void)
+{
+	return a4l_register_drv(&drv_8255);
+}
+
+static void __exit drv_8255_cleanup(void)
+{
+	a4l_unregister_drv(&drv_8255);
+}
+MODULE_DESCRIPTION("Analogy driver for 8255 chip");
+MODULE_LICENSE("GPL");
+
+module_init(drv_8255_init);
+module_exit(drv_8255_cleanup);
+++ linux-patched/drivers/xenomai/analogy/intel/8255.h	2022-03-21 12:58:31.005873203 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/intel/Makefile	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Hardware driver for 8255 chip
+ * @note Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef __ANALOGY_8255_H__
+#define __ANALOGY_8255_H__
+
+#include <rtdm/analogy/device.h>
+
+typedef int (*a4l_8255_cb_t)(int, int, int, unsigned long);
+
+typedef struct subd_8255_struct {
+	unsigned long cb_arg;
+	a4l_8255_cb_t cb_func;
+	unsigned int status;
+	int have_irq;
+	int io_bits;
+} subd_8255_t;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_8255) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_8255_MODULE))
+
+#define _8255_SIZE 4
+
+#define _8255_DATA 0
+#define _8255_CR 3
+
+#define CR_C_LO_IO	0x01
+#define CR_B_IO		0x02
+#define CR_B_MODE	0x04
+#define CR_C_HI_IO	0x08
+#define CR_A_IO		0x10
+#define CR_A_MODE(a)	((a)<<5)
+#define CR_CW		0x80
+
+void a4l_subdev_8255_init(struct a4l_subdevice *subd);
+void a4l_subdev_8255_interrupt(struct a4l_subdevice *subd);
+
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_8255 */
+
+#define a4l_subdev_8255_init(x)		do { } while(0)
+#define a4l_subdev_8255_interrupt(x)	do { } while(0)
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_8255 */
+
+#endif /* !__ANALOGY_8255_H__ */
+++ linux-patched/drivers/xenomai/analogy/intel/Makefile	2022-03-21 12:58:30.997873281 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/instruction.c	1970-01-01 01:00:00.000000000 +0100
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_8255) += analogy_8255.o
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_PARPORT) += analogy_parport.o
+
+analogy_8255-y := 8255.o
+
+analogy_parport-y := parport.o
+++ linux-patched/drivers/xenomai/analogy/instruction.c	2022-03-21 12:58:30.990873349 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/driver_facilities.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, instruction related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/ioport.h>
+#include <linux/mman.h>
+#include <asm/div64.h>
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+int a4l_do_insn_gettime(struct a4l_kernel_instruction * dsc)
+{
+	nanosecs_abs_t ns;
+	uint32_t ns2;
+
+	unsigned int *data = (unsigned int *)dsc->data;
+
+	/* Basic checkings */
+	if (dsc->data_size != 2 * sizeof(unsigned int)) {
+		__a4l_err("a4l_do_insn_gettime: data size should be 2\n");
+		return -EINVAL;
+	}
+
+	/* Get a timestamp */
+	ns = a4l_get_time();
+
+	/* Perform the conversion */
+	ns2 = do_div(ns, 1000000000);
+	data[0] = (unsigned int) ns;
+	data[1] = (unsigned int) ns2 / 1000;
+
+	return 0;
+}
+
+int a4l_do_insn_wait(struct a4l_kernel_instruction * dsc)
+{
+	unsigned int us;
+	unsigned int *data = (unsigned int *)dsc->data;
+
+	/* Basic checkings */
+	if (dsc->data_size != sizeof(unsigned int)) {
+		__a4l_err("a4l_do_insn_wait: data size should be 1\n");
+		return -EINVAL;
+	}
+
+	if (data[0] > A4L_INSN_WAIT_MAX) {
+		__a4l_err("a4l_do_insn_wait: wait duration is out of range\n");
+		return -EINVAL;
+	}
+
+	/* As we use (a4l_)udelay, we have to convert the delay into
+	   microseconds */
+	us = data[0] / 1000;
+
+	/* At least, the delay is rounded up to 1 microsecond */
+	if (us == 0)
+		us = 1;
+
+	/* Performs the busy waiting */
+	a4l_udelay(us);
+
+	return 0;
+}
+
+int a4l_do_insn_trig(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	struct a4l_subdevice *subd;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	unsigned int trignum;
+	unsigned int *data = (unsigned int*)dsc->data;
+
+	/* Basic checkings */
+	if (dsc->data_size > 1) {
+		__a4l_err("a4l_do_insn_trig: data size should not be > 1\n");
+		return -EINVAL;
+	}
+
+	trignum = (dsc->data_size == sizeof(unsigned int)) ? data[0] : 0;
+
+	if (dsc->idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_do_insn_trig: "
+			  "subdevice index is out of range\n");
+		return -EINVAL;
+	}
+
+	subd = dev->transfer.subds[dsc->idx_subd];
+
+	/* Checks that the concerned subdevice is trigger-compliant */
+	if ((subd->flags & A4L_SUBD_CMD) == 0 || subd->trigger == NULL) {
+		__a4l_err("a4l_do_insn_trig: subdevice does not support "
+			  "triggering or asynchronous acquisition\n");
+		return -EINVAL;
+	}
+
+	/* Performs the trigger */
+	return subd->trigger(subd, trignum);
+}
+
+int a4l_fill_insndsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret = 0;
+	void *tmp_data = NULL;
+
+	ret = rtdm_safe_copy_from_user(fd,
+				       dsc, arg, sizeof(a4l_insn_t));
+	if (ret != 0)
+		goto out_insndsc;
+
+	if (dsc->data_size != 0 && dsc->data == NULL) {
+		__a4l_err("a4l_fill_insndsc: no data pointer specified\n");
+		ret = -EINVAL;
+		goto out_insndsc;
+	}
+
+	if (dsc->data_size != 0 && dsc->data != NULL) {
+		tmp_data = rtdm_malloc(dsc->data_size);
+		if (tmp_data == NULL) {
+			ret = -ENOMEM;
+			goto out_insndsc;
+		}
+
+		if ((dsc->type & A4L_INSN_MASK_WRITE) != 0) {
+			ret = rtdm_safe_copy_from_user(fd,
+						       tmp_data, dsc->data,
+						       dsc->data_size);
+			if (ret < 0)
+				goto out_insndsc;
+		}
+	}
+
+	dsc->__udata = dsc->data;
+	dsc->data = tmp_data;
+
+out_insndsc:
+
+	if (ret != 0 && tmp_data != NULL)
+		rtdm_free(tmp_data);
+
+	return ret;
+}
+
+int a4l_free_insndsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret = 0;
+
+	if ((dsc->type & A4L_INSN_MASK_READ) != 0)
+		ret = rtdm_safe_copy_to_user(fd,
+					     dsc->__udata,
+					     dsc->data, dsc->data_size);
+
+	if (dsc->data != NULL)
+		rtdm_free(dsc->data);
+
+	return ret;
+}
+
+int a4l_do_special_insn(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	int ret = 0;
+
+	switch (dsc->type) {
+	case A4L_INSN_GTOD:
+		ret = a4l_do_insn_gettime(dsc);
+		break;
+	case A4L_INSN_WAIT:
+		ret = a4l_do_insn_wait(dsc);
+		break;
+	case A4L_INSN_INTTRIG:
+		ret = a4l_do_insn_trig(cxt, dsc);
+		break;
+	default:
+		__a4l_err("a4l_do_special_insn: "
+			  "incoherent instruction code\n");
+		return -EINVAL;
+	}
+
+	if (ret < 0)
+		__a4l_err("a4l_do_special_insn: "
+			  "execution of the instruction failed (err=%d)\n",
+			  ret);
+
+	return ret;
+}
+
+int a4l_do_insn(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	int ret = 0;
+	struct a4l_subdevice *subd;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	int (*hdlr) (struct a4l_subdevice *, struct a4l_kernel_instruction *) = NULL;
+
+	/* Checks the subdevice index */
+	if (dsc->idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_do_insn: "
+			  "subdevice index out of range (idx=%d)\n",
+			  dsc->idx_subd);
+		return -EINVAL;
+	}
+
+	/* Recovers pointers on the proper subdevice */
+	subd = dev->transfer.subds[dsc->idx_subd];
+
+	/* Checks the subdevice's characteristics */
+	if ((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED) {
+		__a4l_err("a4l_do_insn: wrong subdevice selected\n");
+		return -EINVAL;
+	}
+
+	/* Checks the channel descriptor */
+	if ((subd->flags & A4L_SUBD_TYPES) != A4L_SUBD_CALIB) {
+		ret = a4l_check_chanlist(dev->transfer.subds[dsc->idx_subd],
+					 1, &dsc->chan_desc);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Choose the proper handler, we can check the pointer because
+	   the subdevice was memset to 0 at allocation time */
+	switch (dsc->type) {
+	case A4L_INSN_READ:
+		hdlr = subd->insn_read;
+		break;
+	case A4L_INSN_WRITE:
+		hdlr = subd->insn_write;
+		break;
+	case A4L_INSN_BITS:
+		hdlr = subd->insn_bits;
+		break;
+	case A4L_INSN_CONFIG:
+		hdlr = subd->insn_config;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	/* We check the instruction type */
+	if (ret < 0)
+		return ret;
+
+	/* We check whether a handler is available */
+	if (hdlr == NULL)
+		return -ENOSYS;
+
+	/* Prevents the subdevice from being used during
+	   the following operations */
+	if (test_and_set_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		ret = -EBUSY;
+		goto out_do_insn;
+	}
+
+	/* Let's the driver-specific code perform the instruction */
+	ret = hdlr(subd, dsc);
+
+	if (ret < 0)
+		__a4l_err("a4l_do_insn: "
+			  "execution of the instruction failed (err=%d)\n",
+			  ret);
+
+out_do_insn:
+
+	/* Releases the subdevice from its reserved state */
+	clear_bit(A4L_SUBD_BUSY_NR, &subd->status);
+
+	return ret;
+}
+
+int a4l_ioctl_insn(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret = 0;
+	struct a4l_kernel_instruction insn;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_insn: unattached device\n");
+		return -EINVAL;
+	}
+
+	/* Recovers the instruction descriptor */
+	ret = a4l_fill_insndsc(cxt, &insn, arg);
+	if (ret != 0)
+		goto err_ioctl_insn;
+
+	/* Performs the instruction */
+	if ((insn.type & A4L_INSN_MASK_SPECIAL) != 0)
+		ret = a4l_do_special_insn(cxt, &insn);
+	else
+		ret = a4l_do_insn(cxt, &insn);
+
+	if (ret < 0)
+		goto err_ioctl_insn;
+
+	/* Frees the used memory and sends back some
+	   data, if need be */
+	ret = a4l_free_insndsc(cxt, &insn);
+
+	return ret;
+
+err_ioctl_insn:
+	a4l_free_insndsc(cxt, &insn);
+	return ret;
+}
+
+int a4l_fill_ilstdsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction_list * dsc, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+
+	dsc->insns = NULL;
+
+	/* Recovers the structure from user space */
+	ret = rtdm_safe_copy_from_user(fd,
+				       dsc, arg, sizeof(a4l_insnlst_t));
+	if (ret < 0)
+		return ret;
+
+	/* Some basic checking */
+	if (dsc->count == 0) {
+		__a4l_err("a4l_fill_ilstdsc: instruction list's count is 0\n");
+		return -EINVAL;
+	}
+
+	/* Keeps the user pointer in an opaque field */
+	dsc->__uinsns = (a4l_insn_t *)dsc->insns;
+
+	dsc->insns = rtdm_malloc(dsc->count * sizeof(struct a4l_kernel_instruction));
+	if (dsc->insns == NULL)
+		return -ENOMEM;
+
+	/* Recovers the instructions, one by one. This part is not
+	   optimized */
+	for (i = 0; i < dsc->count && ret == 0; i++)
+		ret = a4l_fill_insndsc(cxt,
+				       &(dsc->insns[i]),
+				       &(dsc->__uinsns[i]));
+
+	/* In case of error, frees the allocated memory */
+	if (ret < 0 && dsc->insns != NULL)
+		rtdm_free(dsc->insns);
+
+	return ret;
+}
+
+int a4l_free_ilstdsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction_list * dsc)
+{
+	int i, ret = 0;
+
+	if (dsc->insns != NULL) {
+
+		for (i = 0; i < dsc->count && ret == 0; i++)
+			ret = a4l_free_insndsc(cxt, &(dsc->insns[i]));
+
+		while (i < dsc->count) {
+			a4l_free_insndsc(cxt, &(dsc->insns[i]));
+			i++;
+		}
+
+		rtdm_free(dsc->insns);
+	}
+
+	return ret;
+}
+
+/* This function is not optimized in terms of memory footprint and
+   CPU charge; however, the whole analogy instruction system was not
+   designed for performance issues */
+int a4l_ioctl_insnlist(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+	struct a4l_kernel_instruction_list ilst;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_insnlist: unattached device\n");
+		return -EINVAL;
+	}
+
+	if ((ret = a4l_fill_ilstdsc(cxt, &ilst, arg)) < 0)
+		return ret;
+
+	/* Performs the instructions */
+	for (i = 0; i < ilst.count && ret == 0; i++) {
+		if ((ilst.insns[i].type & A4L_INSN_MASK_SPECIAL) != 0)
+			ret = a4l_do_special_insn(cxt, &ilst.insns[i]);
+		else
+			ret = a4l_do_insn(cxt, &ilst.insns[i]);
+	}
+
+	if (ret < 0)
+		goto err_ioctl_ilst;
+
+	return a4l_free_ilstdsc(cxt, &ilst);
+
+err_ioctl_ilst:
+	a4l_free_ilstdsc(cxt, &ilst);
+	return ret;
+}
+++ linux-patched/drivers/xenomai/analogy/driver_facilities.c	2022-03-21 12:58:30.983873418 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/rtdm_helpers.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, driver facilities
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <rtdm/analogy/device.h>
+
+/**
+ * @ingroup cobalt
+ * @defgroup analogy Analogy framework
+ * A RTDM-based interface for implementing DAQ card drivers
+ */
+
+/**
+ * @ingroup analogy
+ * @defgroup analogy_driver_facilities Driver API
+ * Programming interface provided to DAQ card drivers
+ */
+
+/* --- Driver section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_driver Driver management services
+ *
+ * Analogy driver registration / unregistration
+ *
+ * In a common Linux char driver, the developer has to register a fops
+ * structure filled with callbacks for read / write / mmap / ioctl
+ * operations.
+ *
+ * Analogy drivers do not have to implement read / write / mmap /
+ * ioctl functions, these procedures are implemented in the Analogy
+ * generic layer. Then, the transfers between user-space and
+ * kernel-space are already managed. Analogy drivers work with commands
+ * and instructions which are some kind of more dedicated read / write
+ * operations. And, instead of registering a fops structure, a Analogy
+ * driver must register some a4l_driver structure.
+ *
+ * @{
+ */
+
+/**
+ * @brief Register an Analogy driver
+ *
+ * After initialising a driver structure, the driver must be made
+ * available so as to be attached.
+ *
+ * @param[in] drv Driver descriptor structure
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_register_drv(struct a4l_driver * drv);
+EXPORT_SYMBOL_GPL(a4l_register_drv);
+
+/**
+ * @brief Unregister an Analogy driver
+ *
+ * This function removes the driver descriptor from the Analogy driver
+ * list. The driver cannot be attached anymore.
+ *
+ * @param[in] drv Driver descriptor structure
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_unregister_drv(struct a4l_driver * drv);
+EXPORT_SYMBOL_GPL(a4l_unregister_drv);
+
+/** @} */
+
+/* --- Subdevice section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_subdevice Subdevice management services
+ *
+ * Subdevice declaration in a driver
+ *
+ * The subdevice structure is the most complex one in the Analogy
+ * driver layer. It contains some description fields to fill and some
+ * callbacks to declare.
+ *
+ * The description fields are:
+ * - flags: to define the subdevice type and its capabilities;
+ * - chan_desc: to describe the channels which compose the subdevice;
+ * - rng_desc: to declare the usable ranges;
+ *
+ * The functions callbakcs are:
+ * - do_cmd() and do_cmdtest(): to performe asynchronous acquisitions
+ *   thanks to commands;
+ * - cancel(): to abort a working asynchronous acquisition;
+ * - munge(): to apply modifications on the data freshly acquired
+ *   during an asynchronous transfer. Warning: using this feature with
+ *   can significantly reduce the performances (if the munge operation
+ *   is complex, it will trigger high CPU charge and if the
+ *   acquisition device is DMA capable, many cache-misses and
+ *   cache-replaces will occur (the benefits of the DMA controller
+ *   will vanish);
+ * - trigger(): optionnaly to launch an asynchronous acquisition;
+ * - insn_read(), insn_write(), insn_bits(), insn_config(): to perform
+ *   synchronous acquisition operations.
+ *
+ * Once the subdevice is filled, it must be inserted into the driver
+ * structure thanks to a4l_add_subd().
+ *
+ * @{
+ */
+
+EXPORT_SYMBOL_GPL(a4l_range_bipolar10);
+EXPORT_SYMBOL_GPL(a4l_range_bipolar5);
+EXPORT_SYMBOL_GPL(a4l_range_unipolar10);
+EXPORT_SYMBOL_GPL(a4l_range_unipolar5);
+EXPORT_SYMBOL_GPL(a4l_range_unknown);
+EXPORT_SYMBOL_GPL(a4l_range_fake);
+
+/**
+ * @brief Allocate a subdevice descriptor
+ *
+ * This is a helper function so as to get a suitable subdevice
+ * descriptor
+ *
+ * @param[in] sizeof_priv Size of the subdevice's private data
+ * @param[in] setup Setup function to be called after the allocation
+ *
+ * @return the index with which the subdevice has been registered, in
+ * case of error a negative error code is returned.
+ *
+ */
+struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv,
+				  void (*setup)(struct a4l_subdevice *));
+EXPORT_SYMBOL_GPL(a4l_alloc_subd);
+
+/**
+ * @brief Add a subdevice to the driver descriptor
+ *
+ * Once the driver descriptor structure is initialized, the function
+ * a4l_add_subd() must be used so to add some subdevices to the
+ * driver.
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the index with which the subdevice has been registered, in
+ * case of error a negative error code is returned.
+ *
+ */
+int a4l_add_subd(struct a4l_device *dev, struct a4l_subdevice *subd);
+EXPORT_SYMBOL_GPL(a4l_add_subd);
+
+/**
+ * @brief Get a pointer to the subdevice descriptor referenced by its
+ * registration index
+ *
+ * This function is scarcely useful as all the drivers callbacks get
+ * the related subdevice descriptor as first argument.
+ * This function is not optimized, it goes through a linked list to
+ * get the proper pointer. So it must not be used in real-time context
+ * but at initialization / cleanup time (attach / detach).
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] idx Subdevice index
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+struct a4l_subdevice *a4l_get_subd(struct a4l_device *dev, int idx);
+EXPORT_SYMBOL_GPL(a4l_get_subd);
+
+/** @} */
+
+/* --- Buffer section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_buffer Buffer management services
+ *
+ * Buffer management services
+ *
+ * The buffer is the key component of the Analogy infrastructure. It
+ * manages transfers between the user-space and the Analogy drivers
+ * thanks to generic functions which are described hereafter. Thanks
+ * to the buffer subsystem, the driver developer does not have to care
+ * about the way the user program retrieves or sends data.
+ *
+ * To write a classical char driver, the developer has to fill a fops
+ * structure so as to provide transfer operations to the user program
+ * (read, write, ioctl and mmap if need be).
+ *
+ * The Analogy infrastructure manages the whole interface with the
+ * userspace; the common read, write, mmap, etc. callbacks are generic
+ * Analogy functions. These functions manage (and perform, if need be)
+ * tranfers between the user-space and an asynchronous buffer thanks
+ * to lockless mechanisms.
+ *
+ * Consequently, the developer has to use the proper buffer functions
+ * in order to write / read acquired data into / from the asynchronous
+ * buffer.
+ *
+ * Here are listed the functions:
+ * - a4l_buf_prepare_(abs)put() and a4l_buf_commit_(abs)put()
+ * - a4l_buf_prepare_(abs)get() and a4l_buf_commit_(abs)get()
+ * - a4l_buf_put()
+ * - a4l_buf_get()
+ * - a4l_buf_evt().
+ *
+ * The functions count might seem high; however, the developer needs a
+ * few of them to write a driver. Having so many functions enables to
+ * manage any transfer cases:
+ * - If some DMA controller is available, there is no need to make the
+ *   driver copy the acquired data into the asynchronous buffer, the
+ *   DMA controller must directly trigger DMA shots into / from the
+ *   buffer. In that case, a function a4l_buf_prepare_*() must be used
+ *   so as to set up the DMA transfer and a function
+ *   a4l_buf_commit_*() has to be called to complete the transfer().
+ * - For DMA controllers which need to work with global counter (the
+ *   transfered data count since the beginning of the acquisition),
+ *   the functions a4l_buf_*_abs_*() have been made available.
+ * - If no DMA controller is available, the driver has to perform the
+ *   copy between the hardware component and the asynchronous
+ *   buffer. In such cases, the functions a4l_buf_get() and
+ *   a4l_buf_put() are useful.
+ *
+ * @{
+ */
+
+/**
+ * @brief Update the absolute count of data sent from the device to
+ * the buffer since the start of the acquisition and after the next
+ * DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(absg)et() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However, some
+ * pointers still have to be updated so as to monitor the tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred during the next
+ * DMA shot plus the data count which have been copied since the start
+ * of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_absput(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_absput);
+
+/**
+ * @brief Set the absolute count of data which was sent from the
+ * device to the buffer since the start of the acquisition and until
+ * the last DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count transferred to the buffer during
+ * the last DMA shot plus the data count which have been sent /
+ * retrieved since the beginning of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_absput(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_absput);
+
+/**
+ * @brief Set the count of data which is to be sent to the buffer at
+ * the next DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_put(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_put);
+
+/**
+ * @brief Set the count of data sent to the buffer during the last
+ * completed DMA shots
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The amount of data transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_put(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_put);
+
+/**
+ * @brief Copy some data from the device driver to the buffer
+ *
+ * The function a4l_buf_put() must copy data coming from some
+ * acquisition device to the Analogy buffer. This ring-buffer is an
+ * intermediate area between the device driver and the user-space
+ * program, which is supposed to recover the acquired data.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] bufdata The data buffer to copy into the Analogy buffer
+ * @param[in] count The amount of data to copy
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_put(struct a4l_subdevice *subd, void *bufdata, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_put);
+
+/**
+ * @brief Update the absolute count of data sent from the buffer to
+ * the device since the start of the acquisition and after the next
+ * DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(absg)et() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred during the next
+ * DMA shot plus the data count which have been copied since the start
+ * of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_absget(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_absget);
+
+/**
+ * @brief Set the absolute count of data which was sent from the
+ * buffer to the device since the start of the acquisition and until
+ * the last DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count transferred to the device during
+ * the last DMA shot plus the data count which have been sent since
+ * the beginning of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_absget(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_absget);
+
+/**
+ * @brief Set the count of data which is to be sent from the buffer to
+ * the device at the next DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_get(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_get);
+
+/**
+ * @brief Set the count of data sent from the buffer to the device
+ * during the last completed DMA shots
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The amount of data transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_get(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_get);
+
+/**
+ * @brief Copy some data from the buffer to the device driver
+ *
+ * The function a4l_buf_get() must copy data coming from the Analogy
+ * buffer to some acquisition device. This ring-buffer is an
+ * intermediate area between the device driver and the user-space
+ * program, which is supposed to provide the data to send to the
+ * device.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] bufdata The data buffer to copy into the Analogy buffer
+ * @param[in] count The amount of data to copy
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_get(struct a4l_subdevice *subd, void *bufdata, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_get);
+
+/**
+ * @brief Signal some event(s) to a user-space program involved in
+ * some read / write operation
+ *
+ * The function a4l_buf_evt() is useful in many cases:
+ * - To wake-up a process waiting for some data to read.
+ * - To wake-up a process waiting for some data to write.
+ * - To notify the user-process an error has occured during the
+ *   acquistion.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] evts Some specific event to notify:
+ * - A4L_BUF_ERROR to indicate some error has occured during the
+ *   transfer
+ * - A4L_BUF_EOA to indicate the acquisition is complete (this
+ *   event is automatically set, it should not be used).
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts);
+EXPORT_SYMBOL_GPL(a4l_buf_evt);
+
+/**
+ * @brief Get the data amount available in the Analogy buffer
+ *
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the amount of data available in the Analogy buffer.
+ *
+ */
+unsigned long a4l_buf_count(struct a4l_subdevice *subd);
+EXPORT_SYMBOL_GPL(a4l_buf_count);
+
+#ifdef DOXYGEN_CPP		/* Only used for doxygen doc generation */
+
+/**
+ * @brief Get the current Analogy command descriptor
+ *
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the command descriptor.
+ *
+ */
+struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice * subd);
+
+#endif /* DOXYGEN_CPP */
+
+/**
+ * @brief Get the channel index according to its type
+ *
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the channel index.
+ *
+ */
+int a4l_get_chan(struct a4l_subdevice *subd);
+EXPORT_SYMBOL_GPL(a4l_get_chan);
+
+/** @} */
+
+/* --- IRQ handling section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_irq Interrupt management services
+ * @{
+ */
+
+/**
+ * @brief Get the interrupt number in use for a specific device
+ *
+ * @param[in] dev Device descriptor structure
+ *
+ * @return the line number used or A4L_IRQ_UNUSED if no interrupt
+ * is registered.
+ *
+ */
+unsigned int a4l_get_irq(struct a4l_device * dev);
+EXPORT_SYMBOL_GPL(a4l_get_irq);
+
+/**
+ * @brief Register an interrupt handler for a specific device
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] irq Line number of the addressed IRQ
+ * @param[in] handler Interrupt handler
+ * @param[in] flags Registration flags:
+ * - RTDM_IRQTYPE_SHARED: enable IRQ-sharing with other drivers
+ *   (Warning: real-time drivers and non-real-time drivers cannot
+ *   share an interrupt line).
+ * - RTDM_IRQTYPE_EDGE: mark IRQ as edge-triggered (Warning: this flag
+ *   is meaningless in RTDM-less context).
+ * - A4L_IRQ_DISABLED: keep IRQ disabled when calling the action
+ *   handler (Warning: this flag is ignored in RTDM-enabled
+ *   configuration).
+ * @param[in] cookie Pointer to be passed to the interrupt handler on
+ * invocation
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_request_irq(struct a4l_device * dev,
+		       unsigned int irq,
+		       a4l_irq_hdlr_t handler,
+		       unsigned long flags, void *cookie);
+EXPORT_SYMBOL_GPL(a4l_request_irq);
+
+/**
+ * @brief Release an interrupt handler for a specific device
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] irq Line number of the addressed IRQ
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_free_irq(struct a4l_device * dev, unsigned int irq);
+EXPORT_SYMBOL_GPL(a4l_free_irq);
+
+/** @} */
+
+/* --- Misc section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_misc Misc services
+ * @{
+ */
+
+/**
+ * @brief Get the absolute time in nanoseconds
+ *
+ * @return the absolute time expressed in nanoseconds
+ *
+ */
+unsigned long long a4l_get_time(void);
+EXPORT_SYMBOL_GPL(a4l_get_time);
+
+/** @} */
+++ linux-patched/drivers/xenomai/analogy/rtdm_helpers.c	2022-03-21 12:58:30.975873496 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/buffer.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, RTDM helpers
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <asm/atomic.h>
+
+#include <rtdm/analogy/rtdm_helpers.h>
+
+/* --- Time section --- */
+
+static nanosecs_abs_t a4l_clkofs;
+
+void a4l_init_time(void)
+{
+	nanosecs_abs_t t1, t2;
+	t1 = rtdm_clock_read();
+	t2 = ktime_to_ns(ktime_get_real());
+	a4l_clkofs = t2 - t1;
+}
+
+nanosecs_abs_t a4l_get_time(void)
+{
+	return a4l_clkofs + rtdm_clock_read();
+}
+
+/* --- IRQ section --- */
+
+static int a4l_handle_irq(rtdm_irq_t *irq_handle)
+{
+	struct a4l_irq_descriptor *dsc =
+		rtdm_irq_get_arg(irq_handle, struct a4l_irq_descriptor);
+
+	if (dsc->handler((unsigned int)irq_handle->irq, dsc->cookie) == 0)
+		return RTDM_IRQ_HANDLED;
+	else
+		return RTDM_IRQ_NONE;
+}
+
+int __a4l_request_irq(struct a4l_irq_descriptor *dsc,
+		      unsigned int irq,
+		      a4l_irq_hdlr_t handler,
+		      unsigned long flags, void *cookie)
+{
+	/* Fills the IRQ descriptor */
+	dsc->handler = handler;
+	dsc->cookie = cookie;
+	dsc->irq = irq;
+
+	/* Registers the RT IRQ handler */
+	return rtdm_irq_request(&dsc->rtdm_desc,
+				(int)irq,
+				a4l_handle_irq, flags, "Analogy device", dsc);
+}
+
+int __a4l_free_irq(struct a4l_irq_descriptor * dsc)
+{
+	return rtdm_irq_free(&dsc->rtdm_desc);
+}
+
+/* --- Synchronization section --- */
+
+static void a4l_nrt_sync_handler(rtdm_nrtsig_t *nrt_sig, void *arg)
+{
+	struct a4l_sync *snc = (struct a4l_sync *) arg;
+	wake_up_interruptible(&snc->wq);
+}
+
+int a4l_init_sync(struct a4l_sync *snc)
+{
+	int ret = 0;
+
+	/* Initializes the flags field */
+	snc->status = 0;
+
+	/* If the process is NRT, we need a wait queue structure */
+	init_waitqueue_head(&snc->wq);
+
+	/* Initializes the RTDM event */
+	rtdm_event_init(&snc->rtdm_evt, 0);
+
+	/* Initializes the gateway to NRT context */
+	rtdm_nrtsig_init(&snc->nrt_sig, a4l_nrt_sync_handler, snc);
+
+	return ret;
+}
+
+void a4l_cleanup_sync(struct a4l_sync *snc)
+{
+	rtdm_nrtsig_destroy(&snc->nrt_sig);
+	rtdm_event_destroy(&snc->rtdm_evt);
+}
+
+int a4l_wait_sync(struct a4l_sync *snc, int rt)
+{
+	int ret = 0;
+
+	if (test_bit(__EVT_PDING, &snc->status))
+		goto out_wait;
+
+	if (rt != 0) {
+		/* If the calling process is in primary mode,
+		   we can use RTDM API ... */
+		set_bit(__RT_WAITER, &snc->status);
+		ret = rtdm_event_wait(&snc->rtdm_evt);
+	} else {
+		/* ... else if the process is NRT,
+		   the Linux wait queue system is used */
+		set_bit(__NRT_WAITER, &snc->status);
+		ret = wait_event_interruptible(snc->wq,
+					       test_bit(__EVT_PDING,
+							&snc->status));
+	}
+
+out_wait:
+
+	clear_bit(__EVT_PDING, &snc->status);
+
+	return ret;
+}
+
+int a4l_timedwait_sync(struct a4l_sync * snc,
+		       int rt, unsigned long long ns_timeout)
+{
+	int ret = 0;
+	unsigned long timeout;
+
+	if (test_bit(__EVT_PDING, &snc->status))
+		goto out_wait;
+
+	if (rt != 0) {
+		/* If the calling process is in primary mode,
+		   we can use RTDM API ... */
+		set_bit(__RT_WAITER, &snc->status);
+		ret = rtdm_event_timedwait(&snc->rtdm_evt, ns_timeout, NULL);
+	} else {
+		/* ... else if the process is NRT,
+		   the Linux wait queue system is used */
+
+		timeout = do_div(ns_timeout, 1000);
+
+		/* We consider the Linux kernel cannot tick at a frequency
+		   higher than 1 MHz
+		   If the timeout value is lower than 1us, we round up to 1us */
+		timeout = (timeout == 0) ? 1 : usecs_to_jiffies(timeout);
+
+		set_bit(__NRT_WAITER, &snc->status);
+
+		ret = wait_event_interruptible_timeout(snc->wq,
+						       test_bit(__EVT_PDING,
+								&snc->status),
+						       timeout);
+	}
+
+out_wait:
+
+	clear_bit(__EVT_PDING, &snc->status);
+
+	return ret;
+}
+
+void a4l_flush_sync(struct a4l_sync * snc)
+{
+	/* Clear the status bitfield */
+	snc->status = 0;
+
+	/* Flush the RTDM event */
+	rtdm_event_clear(&snc->rtdm_evt);
+}
+
+void a4l_signal_sync(struct a4l_sync * snc)
+{
+	int hit = 0;
+
+	set_bit(__EVT_PDING, &snc->status);
+
+	/* a4l_signal_sync() is bound not to be called upon the right
+	   user process context; so, the status flags stores its mode.
+	   Thus the proper event signaling function is called */
+	if (test_and_clear_bit(__RT_WAITER, &snc->status)) {
+		rtdm_event_signal(&snc->rtdm_evt);
+		hit++;
+	}
+
+	if (test_and_clear_bit(__NRT_WAITER, &snc->status)) {
+		rtdm_nrtsig_pend(&snc->nrt_sig);
+		hit++;
+	}
+
+	if (hit == 0) {
+		/* At first signaling, we may not know the proper way
+		   to send the event */
+		rtdm_event_signal(&snc->rtdm_evt);
+		rtdm_nrtsig_pend(&snc->nrt_sig);
+	}
+}
+++ linux-patched/drivers/xenomai/analogy/buffer.c	2022-03-21 12:58:30.968873564 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/drivers/xenomai/analogy/rtdm_interface.c	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, buffer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/vmalloc.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+#include <rtdm/analogy/device.h>
+
+/* --- Initialization functions (init, alloc, free) --- */
+
+/* The buffer charactistic is very close to the Comedi one: it is
+   allocated with vmalloc() and all physical addresses of the pages which
+   compose the virtual buffer are hold in a table */
+
+void a4l_free_buffer(struct a4l_buffer * buf_desc)
+{
+	__a4l_dbg(1, core_dbg, "buf=%p buf->buf=%p\n", buf_desc, buf_desc->buf);
+
+	if (buf_desc->pg_list != NULL) {
+		rtdm_free(buf_desc->pg_list);
+		buf_desc->pg_list = NULL;
+	}
+
+	if (buf_desc->buf != NULL) {
+		char *vaddr, *vabase = buf_desc->buf;
+		for (vaddr = vabase; vaddr < vabase + buf_desc->size;
+		     vaddr += PAGE_SIZE)
+			ClearPageReserved(vmalloc_to_page(vaddr));
+		vfree(buf_desc->buf);
+		buf_desc->buf = NULL;
+	}
+}
+
+int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size)
+{
+	int ret = 0;
+	char *vaddr, *vabase;
+
+	buf_desc->size = buf_size;
+	buf_desc->size = PAGE_ALIGN(buf_desc->size);
+
+	buf_desc->buf = vmalloc_32(buf_desc->size);
+	if (buf_desc->buf == NULL) {
+		ret = -ENOMEM;
+		goto out_virt_contig_alloc;
+	}
+
+	vabase = buf_desc->buf;
+
+	for (vaddr = vabase; vaddr < vabase + buf_desc->size;
+	     vaddr += PAGE_SIZE)
+		SetPageReserved(vmalloc_to_page(vaddr));
+
+	buf_desc->pg_list = rtdm_malloc(((buf_desc->size) >> PAGE_SHIFT) *
+					sizeof(unsigned long));
+	if (buf_desc->pg_list == NULL) {
+		ret = -ENOMEM;
+		goto out_virt_contig_alloc;
+	}
+
+	for (vaddr = vabase; vaddr < vabase + buf_desc->size;
+	     vaddr += PAGE_SIZE)
+		buf_desc->pg_list[(vaddr - vabase) >> PAGE_SHIFT] =
+			(unsigned long) page_to_phys(vmalloc_to_page(vaddr));
+
+	__a4l_dbg(1, core_dbg, "buf=%p buf->buf=%p\n", buf_desc, buf_desc->buf);
+
+out_virt_contig_alloc:
+	if (ret != 0)
+		a4l_free_buffer(buf_desc);
+
+	return ret;
+}
+
+static void a4l_reinit_buffer(struct a4l_buffer *buf_desc)
+{
+	/* No command to process yet */
+	buf_desc->cur_cmd = NULL;
+
+	/* No more (or not yet) linked with a subdevice */
+	buf_desc->subd = NULL;
+
+	/* Initializes counts and flags */
+	buf_desc->end_count = 0;
+	buf_desc->prd_count = 0;
+	buf_desc->cns_count = 0;
+	buf_desc->tmp_count = 0;
+	buf_desc->mng_count = 0;
+
+	/* Flush pending events */
+	buf_desc->flags = 0;
+	a4l_flush_sync(&buf_desc->sync);
+}
+
+void a4l_init_buffer(struct a4l_buffer *buf_desc)
+{
+	memset(buf_desc, 0, sizeof(struct a4l_buffer));
+	a4l_init_sync(&buf_desc->sync);
+	a4l_reinit_buffer(buf_desc);
+}
+
+void a4l_cleanup_buffer(struct a4l_buffer *buf_desc)
+{
+	a4l_cleanup_sync(&buf_desc->sync);
+}
+
+int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_buffer *buf_desc = cxt->buffer;
+	int i;
+
+	/* Retrieve the related subdevice */
+	buf_desc->subd = a4l_get_subd(cxt->dev, cmd->idx_subd);
+	if (buf_desc->subd == NULL) {
+		__a4l_err("a4l_setup_buffer: subdevice index "
+			  "out of range (%d)\n", cmd->idx_subd);
+		return -EINVAL;
+	}
+
+	if (test_and_set_bit(A4L_SUBD_BUSY_NR, &buf_desc->subd->status)) {
+		__a4l_err("a4l_setup_buffer: subdevice %d already busy\n",
+			  cmd->idx_subd);
+		return -EBUSY;
+	}
+
+	/* Checks if the transfer system has to work in bulk mode */
+	if (cmd->flags & A4L_CMD_BULK)
+		set_bit(A4L_BUF_BULK_NR, &buf_desc->flags);
+
+	/* Sets the working command */
+	buf_desc->cur_cmd = cmd;
+
+	/* Link the subdevice with the context's buffer */
+	buf_desc->subd->buf = buf_desc;
+
+	/* Compute