diff --git a/MPSoC/Makefile.forsoclib b/MPSoC/Makefile.forsoclib
index 75856ec68272976a1045b9761f0125309b033fc8..ff16e546613a8578c74672131880a8f9f4f08fac 100755
--- a/MPSoC/Makefile.forsoclib
+++ b/MPSoC/Makefile.forsoclib
@@ -16,7 +16,7 @@ export PATH
 updateruntime:
 	cp src/*.c mutekh/libavatar/
 	cp src/*.h mutekh/libavatar/include/  
-	cp src/Makefile mutekh/libavatar/
+#	cp src/Makefile mutekh/libavatar/
 	
 updategeneratedcode:
 	cp src/defs.h mutekh/examples/avatar
diff --git a/MPSoC/mutekh/examples/avatar/mwmr.h b/MPSoC/mutekh/examples/avatar/mwmr.h
new file mode 100644
index 0000000000000000000000000000000000000000..3adc2add0ef574ca409a221b0bcf3f79196e4867
--- /dev/null
+++ b/MPSoC/mutekh/examples/avatar/mwmr.h
@@ -0,0 +1,306 @@
+/*
+ * This file is distributed under the terms of the GNU General Public
+ * License.
+ * 
+ * Copyright (c) UPMC / Lip6
+ *     2005-2008, Nicolas Pouillon, <nipo@ssji.net>
+ */
+
+#ifndef MWMR_H_
+#define MWMR_H_
+
+/**
+   @file
+   @module{MWMR}
+   @short MWMR channels access
+ */
+
+/**
+   @this is an abstract MWMR channel structure.
+ */
+struct mwmr_s;
+
+#if defined(CONFIG_MWMR_PTHREAD)
+
+# include <pthread.h>
+
+/** @hidden */
+struct mwmr_status_s
+{
+	pthread_mutex_t lock;
+	pthread_cond_t nempty;
+	pthread_cond_t nfull;
+	size_t rptr, wptr;
+	size_t usage;
+};
+
+/** @hidden */
+struct mwmr_s {
+	struct mwmr_status_s *status;
+	size_t width;
+	size_t depth;
+	size_t gdepth;
+	uint8_t *buffer;
+	const char *const name;
+};
+
+/** @hidden */
+typedef struct {} srl_mwmr_lock_t;
+/** @hidden */
+# define MWMR_LOCK_INITIALIZER {}
+
+/** @hidden */
+# define MWMR_STATUS_INITIALIZER(x,y)							\
+	{															\
+		.lock = PTHREAD_MUTEX_INITIALIZER,						\
+		.nempty = PTHREAD_COND_INITIALIZER,						\
+		.nfull = PTHREAD_COND_INITIALIZER,						\
+		.rptr = 0,      										\
+		.wptr = 0,      										\
+		.usage = 0,												\
+	}
+
+/** @hidden */
+# define MWMR_INITIALIZER(w, d, b, st, n, l)					   \
+	{														   \
+		.width = w,											   \
+		.depth = d,											   \
+		.gdepth = (w)*(d),									   \
+		.status = st,									   	   \
+		.buffer = (void*)b,									   \
+		.name = n,											   \
+	}
+
+#elif defined CONFIG_MWMR_SOCLIB
+
+# ifdef CONFIG_MWMR_LOCKFREE
+
+/** @hidden */
+enum SoclibMwmrRegisters {
+    MWMR_IOREG_MAX = 16,
+    MWMR_RESET = MWMR_IOREG_MAX,
+    MWMR_CONFIG_FIFO_WAY,
+    MWMR_CONFIG_FIFO_NO,
+    MWMR_CONFIG_STATUS_ADDR,
+    MWMR_CONFIG_DEPTH, // bytes
+    MWMR_CONFIG_BUFFER_ADDR,
+    MWMR_CONFIG_RUNNING,
+    MWMR_CONFIG_WIDTH, // bytes
+    MWMR_CONFIG_ENDIANNESS, // Write 0x11223344 here
+    MWMR_FIFO_FILL_STATUS,
+};
+
+/** @hidden */
+enum SoclibMwmrWay {
+    MWMR_TO_COPROC,
+    MWMR_FROM_COPROC,
+};
+
+/** @hidden */
+struct mwmr_status_s
+{
+	uint32_t free_tail; // bytes
+	uint32_t free_head; // bytes
+	uint32_t free_size; // bytes
+
+	uint32_t data_tail; // bytes
+	uint32_t data_head; // bytes
+	uint32_t data_size; // bytes
+};
+
+/** @hidden */
+#  define MWMR_STATUS_INITIALIZER(w, d) {0,0,(w*d),0,0,0}
+
+# else /* not CONFIG_MWMR_LOCKFREE */
+
+/** @hidden */
+enum SoclibMwmrRegisters {
+    MWMR_IOREG_MAX = 16,
+    MWMR_RESET = MWMR_IOREG_MAX,
+    MWMR_CONFIG_FIFO_WAY,
+    MWMR_CONFIG_FIFO_NO,
+    MWMR_CONFIG_STATUS_ADDR,
+    MWMR_CONFIG_DEPTH,
+    MWMR_CONFIG_BUFFER_ADDR,
+    MWMR_CONFIG_LOCK_ADDR,
+    MWMR_CONFIG_RUNNING,
+    MWMR_CONFIG_WIDTH,
+    MWMR_FIFO_FILL_STATUS,
+};
+
+/** @hidden */
+struct mwmr_status_s
+{
+	uint32_t rptr;
+	uint32_t wptr;
+	uint32_t usage;
+	uint32_t lock;
+};
+
+/** @hidden */
+#  define MWMR_STATUS_INITIALIZER(w,d) {0,0,0,0}
+
+# endif /* CONFIG_MWMR_LOCKFREE */
+
+# ifdef CONFIG_MWMR_USE_RAMLOCKS
+/** @hidden */
+#  define MWMR_USE_SEPARATE_LOCKS
+/** @hidden */
+typedef uint32_t srl_mwmr_lock_t;
+/** @hidden */
+#  define MWMR_LOCK_INITIALIZER 0
+# endif
+
+/** @hidden */
+struct mwmr_s {
+	size_t width;
+	size_t depth;
+	size_t gdepth;
+	void *buffer;
+	struct mwmr_status_s *status;
+	const char *const name;
+# ifdef CONFIG_MWMR_INSTRUMENTATION
+	uint32_t n_read;
+	uint32_t n_write;
+	uint32_t time_read;
+	uint32_t time_write;
+# endif
+# ifdef CONFIG_MWMR_USE_RAMLOCKS
+/** @hidden */
+	srl_mwmr_lock_t *lock;
+# endif
+};
+
+# ifdef CONFIG_MWMR_USE_RAMLOCKS
+
+/** @hidden */
+#  define MWMR_INITIALIZER(w, d, b, st, n, l)				   \
+	{														   \
+		.width = w,											   \
+		.depth = d,											   \
+		.gdepth = (w)*(d),									   \
+		.buffer = (void*)b,									   \
+		.status = st,									   	   \
+		.name = n,											   \
+		.lock = l,											   \
+	}
+# else
+
+/** @hidden */
+typedef struct {} srl_mwmr_lock_t;
+/** @hidden */
+#  define MWMR_LOCK_INITIALIZER {}
+
+/** @hidden */
+#  define MWMR_INITIALIZER(w, d, b, st, n, l)				   \
+	{														   \
+		.width = w,											   \
+		.depth = d,											   \
+		.gdepth = (w)*(d),									   \
+		.buffer = (void*)b,									   \
+		.status = st,									   	   \
+		.name = n,											   \
+	}
+# endif
+
+#else
+# error No valid MWMR implementation
+#endif
+
+#ifdef CONFIG_MWMR_INSTRUMENTATION
+
+/**
+   @this dumps statistics about usage of the channel to current
+   console.
+
+   @param channel The channel
+*/
+void mwmr_dump_stats( const struct mwmr_s *channel );
+
+/**
+   @this resets statistics about usage of the channel.
+
+   @param channel The channel
+*/
+void mwmr_clear_stats( struct mwmr_s *channel );
+#endif
+
+/**
+   @this is the way of the channel designated when configuring an
+   hardware MWMR controller
+ */
+enum SoclibMwmrWay {
+    MWMR_TO_COPROC,
+    MWMR_FROM_COPROC,
+};
+
+/**
+   @this initializes an hardware MWMR controller for usage of a given
+   channel. Controller starts to use channel as soon as configured.
+
+   @param coproc Base address of controller
+   @param way Way of the channel
+   @param no Number of the channel. Channels are numbered from 0 in
+             each way.
+   @param mwmr Channel to use from coprocessor
+ */
+void mwmr_hw_init( void *coproc, enum SoclibMwmrWay way,
+				   size_t no, const struct mwmr_s* mwmr );
+
+/**
+   @this resets the channel's internal state. All data inside it will
+   be lost.
+
+   @param channel The channel to reset
+ */
+void mwmr_init( struct mwmr_s *channel );
+
+/**
+   @this reads a given size from a mwmr channel. If the size is not
+   available in the channel, the read will block.
+
+   @param channel The mwmr channel
+   @param buffer The buffer to retrieve data into
+   @param size The size (in bytes) of the requested transfer. This has
+   to be a multiple of the channel's width.
+ */
+void mwmr_read( struct mwmr_s *channel, void *buffer, size_t size );
+
+/**
+   @this writes a given size from a mwmr channel. If the size is not
+   free in the channel, the write will block.
+
+   @param channel The mwmr channel
+   @param buffer The buffer to retrieve data from
+   @param size The size (in bytes) of the requested transfer. This has
+   to be a multiple of the channel's width.
+ */
+void mwmr_write( struct mwmr_s *channel, const void *buffer, size_t size );
+
+/**
+   @this reads a given size from a mwmr channel. If the size is not
+   available in the channel, or if the lock is not available, @this will
+   return without transfering the whole buffer.
+
+   @param channel The mwmr channel
+   @param buffer The buffer to retrieve data into
+   @param size The size (in bytes) of the requested transfer. This has
+   to be a multiple of the channel's width.
+   @return the amount of bytes actually transfered
+ */
+size_t mwmr_try_read( struct mwmr_s *channel, void *buffer, size_t size );
+
+/**
+   @this writes a given size from a mwmr channel. If the size is not
+   free in the channel, or if the lock is not available, @this will
+   return without transfering the whole buffer.
+
+   @param channel The mwmr channel
+   @param buffer The buffer to retrieve data from
+   @param size The size (in bytes) of the requested transfer. This has
+   to be a multiple of the channel's width.
+   @return the amount of bytes actually transfered
+ */
+size_t mwmr_try_write( struct mwmr_s *channel, const void *buffer, size_t size );
+
+#endif
diff --git a/MPSoC/mutekh/examples/avatar/mwmr_pthread.c b/MPSoC/mutekh/examples/avatar/mwmr_pthread.c
new file mode 100644
index 0000000000000000000000000000000000000000..77af37926f11b57eb6307be25c057108fafde3bd
--- /dev/null
+++ b/MPSoC/mutekh/examples/avatar/mwmr_pthread.c
@@ -0,0 +1,155 @@
+/*
+ * This file is distributed under the terms of the GNU General Public
+ * License.
+ * 
+ * Copyright (c) UPMC / Lip6
+ *     2005-2008, Nicolas Pouillon, <nipo@ssji.net>
+ */
+#include <pthread.h>
+#include <string.h>
+#include <assert.h>
+
+#include <hexo/types.h>
+#include <mwmr/mwmr.h>
+
+void mwmr_read( struct mwmr_s *fifo, void *mem, size_t len )
+{
+	struct mwmr_status_s *state = fifo->status;
+    size_t got = 0;
+    uint8_t *ptr = (uint8_t *)mem;
+
+//  mutek_instrument_trace(0);
+
+    assert ( len % fifo->width == 0 );
+
+    pthread_mutex_lock( &(state->lock) );
+    while ( got < len ) {
+        while ( ! state->usage ) {
+            pthread_cond_wait( &(state->nempty), &(state->lock) );
+        }
+        memcpy( ptr, fifo->buffer + state->rptr, fifo->width );
+        state->rptr += fifo->width;
+        if ( state->rptr == fifo->gdepth )
+            state->rptr = 0;
+        state->usage -= fifo->width;
+		assert( state->rptr < fifo->gdepth );
+		assert( state->usage <= fifo->gdepth );
+        pthread_cond_signal( &(state->nfull) );
+        got += fifo->width;
+        ptr += fifo->width;
+    }
+    pthread_mutex_unlock( &(state->lock) );
+    pthread_cond_signal( &(state->nfull) );
+    pthread_yield();
+
+//  mutek_instrument_trace(1);
+}
+
+void mwmr_write( struct mwmr_s *fifo, const void *mem, size_t len )
+{
+	struct mwmr_status_s *state = fifo->status;
+    size_t put = 0;
+    uint8_t *ptr = (uint8_t *)mem;
+
+//  mutek_instrument_trace(0);
+
+    assert ( len % fifo->width == 0 );
+
+    pthread_mutex_lock( &(state->lock) );
+    while ( put < len ) {
+        while ( state->usage == fifo->gdepth ) {
+            pthread_cond_wait( &(state->nfull), &(state->lock) );
+        }
+        memcpy( fifo->buffer + state->wptr, ptr, fifo->width );
+        state->wptr += fifo->width;
+        if ( state->wptr == fifo->gdepth )
+            state->wptr = 0;
+        state->usage += fifo->width;
+		assert( state->wptr < fifo->gdepth );
+		assert( state->usage <= fifo->gdepth );
+        pthread_cond_signal( &(state->nempty) );
+        put += fifo->width;
+        ptr += fifo->width;
+    }
+    pthread_mutex_unlock( &(state->lock) );
+    pthread_cond_signal( &(state->nempty) );
+
+    pthread_yield();
+//	mutek_instrument_trace(1);
+}
+
+size_t mwmr_try_read( struct mwmr_s *fifo, void *mem, size_t len )
+{
+	struct mwmr_status_s *state = fifo->status;
+    size_t got = 0;
+    uint8_t *ptr = (uint8_t *)mem;
+
+    assert ( len % fifo->width == 0 );
+
+    if ( pthread_mutex_trylock( &(state->lock) ) ) {
+        return 0;
+    }
+    
+    while ( got < len ) {
+        if ( ! state->usage ) {
+            pthread_mutex_unlock( &(state->lock) );
+            pthread_cond_signal( &(state->nfull) );
+            return got;
+        }
+        memcpy( ptr, fifo->buffer + state->rptr, fifo->width );
+        state->rptr += fifo->width;
+        if ( state->rptr == fifo->gdepth )
+            state->rptr = 0;
+        state->usage -= fifo->width;
+        got += fifo->width;
+        ptr += fifo->width;
+    }
+    pthread_mutex_unlock( &(state->lock) );
+    pthread_cond_signal( &(state->nfull) );
+
+    pthread_yield();
+
+    return got;
+}
+
+size_t mwmr_try_write( struct mwmr_s *fifo, const void *mem, size_t len )
+{
+	struct mwmr_status_s *state = fifo->status;
+    size_t put = 0;
+    uint8_t *ptr = (uint8_t *)mem;
+
+    assert( len % fifo->width == 0 );
+
+    if ( pthread_mutex_trylock( &(state->lock) ) ) {
+        return 0;
+    }
+
+    while ( put < len ) {
+        if ( state->usage == fifo->gdepth ) {
+            pthread_mutex_unlock( &(state->lock) );
+            pthread_cond_signal( &(state->nempty) );
+            return put;
+        }
+        memcpy( fifo->buffer + state->wptr, ptr, fifo->width );
+        state->wptr += fifo->width;
+        if ( state->wptr == fifo->gdepth )
+            state->wptr = 0;
+        state->usage += fifo->width;
+        put += fifo->width;
+        ptr += fifo->width;
+    }
+    pthread_mutex_unlock( &(state->lock) );
+    pthread_cond_signal( &(state->nempty) );
+
+    pthread_yield();
+
+    return put;
+}
+
+void mwmr_init( struct mwmr_s *fifo )
+{
+	struct mwmr_status_s *state = fifo->status;
+	pthread_cond_init(&state->nempty, NULL);
+	pthread_cond_init(&state->nfull, NULL);
+	pthread_mutex_init(&state->lock, NULL);
+}
diff --git a/MPSoC/mutekh/examples/avatar/mwmr_soclib.c b/MPSoC/mutekh/examples/avatar/mwmr_soclib.c
new file mode 100644
index 0000000000000000000000000000000000000000..43a31fc5407edbee8a28bb6a57ba63553b2c48e6
--- /dev/null
+++ b/MPSoC/mutekh/examples/avatar/mwmr_soclib.c
@@ -0,0 +1,377 @@
+/*
+ * This file is part of MutekH.
+ * 
+ * MutekH is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 of the License.
+ * 
+ * MutekH is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ * 
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with MutekH; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * Copyright (c) UPMC, Lip6, SoC
+ *         Nicolas Pouillon <nipo@ssji.net>, 2008
+ */
+
+#include <mutek/scheduler.h>
+#include <hexo/types.h>
+#include <hexo/atomic.h>
+#include <hexo/iospace.h>
+#include <hexo/endian.h>
+#include <hexo/interrupt.h>
+#include <string.h>
+#include <mwmr/mwmr.h>
+
+#if defined(CONFIG_SRL) && !defined(CONFIG_PTHREAD)
+# include <srl/srl_sched_wait.h>
+# include <srl/srl_log.h>
+# ifndef SRL_VERBOSITY
+#  define SRL_VERBOSITY VERB_DEBUG
+# endif
+#elif defined(CONFIG_PTHREAD)
+# include <pthread.h>
+#endif
+
+static inline size_t min(size_t a, size_t b)
+{
+	if ( a < b )
+		return a;
+	else
+		return b;
+}
+
+void
+mwmr_hw_init( void *coproc, enum SoclibMwmrWay way,
+			  size_t no, const struct mwmr_s* mwmr )
+{
+	uintptr_t c = (uintptr_t)coproc;
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_FIFO_WAY, endian_le32(way));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_FIFO_NO, endian_le32(no));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_STATUS_ADDR, endian_le32((uintptr_t)mwmr->status));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_DEPTH, endian_le32(mwmr->gdepth));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_BUFFER_ADDR, endian_le32((uintptr_t)mwmr->buffer));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_WIDTH, endian_le32((uintptr_t)mwmr->width));
+#ifdef CONFIG_MWMR_USE_RAMLOCKS
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_LOCK_ADDR, endian_le32((uintptr_t)mwmr->lock));
+#endif
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_RUNNING, endian_le32(1));
+}
+
+void mwmr_config( void *coproc, size_t no, const uint32_t val )
+{
+	uintptr_t c = (uintptr_t)coproc;
+	cpu_mem_write_32( c + sizeof(uint32_t) * no, val);
+}
+
+uint32_t mwmr_status( void *coproc, size_t no )
+{
+	uintptr_t c = (uintptr_t)coproc;
+	return cpu_mem_read_32( c + sizeof(uint32_t) * no );
+}
+
+static inline void mwmr_lock( struct mwmr_s *fifo )
+{
+#ifdef CONFIG_MWMR_USE_RAMLOCKS
+	while (*((uint32_t *)fifo->lock) != 0) {
+# if defined(CONFIG_PTHREAD)
+		pthread_yield();
+# else
+		cpu_interrupt_disable();
+		sched_context_switch();
+		cpu_interrupt_enable();
+# endif
+	}
+#else
+# if defined(CONFIG_SRL) && !defined(CONFIG_PTHREAD)
+	while (cpu_atomic_bit_testset((atomic_int_t*)&fifo->status->lock, 0)) {
+/* 		cpu_interrupt_disable(); */
+/* 		sched_context_switch(); */
+/* 		cpu_interrupt_enable(); */
+		srl_sched_wait_eq_le(&fifo->status->lock, 0);
+	}
+# elif defined(CONFIG_PTHREAD)
+	while (cpu_atomic_bit_testset((atomic_int_t*)&fifo->status->lock, 0)) {
+		pthread_yield();
+	}
+# else
+	cpu_atomic_bit_waitset((atomic_int_t*)&fifo->status->lock, 0);
+# endif
+#endif
+}
+
+static inline uint32_t mwmr_try_lock( struct mwmr_s *fifo )
+{
+#ifdef CONFIG_MWMR_USE_RAMLOCKS
+	return !!cpu_mem_read_32((uintptr_t)fifo->lock);
+#else
+	return cpu_atomic_bit_testset((atomic_int_t*)&fifo->status->lock, 0);
+#endif
+}
+
+static inline void mwmr_unlock( struct mwmr_s *fifo )
+{
+#ifdef CONFIG_MWMR_USE_RAMLOCKS
+	cpu_mem_write_32((uintptr_t)fifo->lock, 0);
+#else
+    cpu_mem_write_32((uintptr_t)&fifo->status->lock, 0);
+#endif
+}
+
+typedef struct {
+	uint32_t usage, wptr, rptr, modified;
+} local_mwmr_status_t;
+
+static inline void rehash_status( struct mwmr_s *fifo, local_mwmr_status_t *status )
+{
+	struct mwmr_status_s *fstatus = fifo->status;
+	cpu_dcache_invld_buf((void*)fstatus, sizeof(*fstatus));
+	status->usage = endian_le32(cpu_mem_read_32( (uintptr_t)&fstatus->usage ));
+    status->wptr =  endian_le32(cpu_mem_read_32( (uintptr_t)&fstatus->wptr ));
+    status->rptr =  endian_le32(cpu_mem_read_32( (uintptr_t)&fstatus->rptr ));
+	status->modified = 0;
+//	srl_log_printf(NONE,"%s %d %d %d/%d\n", fifo->name, status->rptr, status->wptr, status->usage, fifo->gdepth);
+}
+
+static inline void writeback_status( struct mwmr_s *fifo, local_mwmr_status_t *status )
+{
+    struct mwmr_status_s *fstatus = fifo->status;
+	if ( !status->modified )
+		return;
+	cpu_mem_write_32( (uintptr_t)&fstatus->usage, endian_le32(status->usage) );
+    cpu_mem_write_32( (uintptr_t)&fstatus->wptr, endian_le32(status->wptr) );
+	cpu_mem_write_32( (uintptr_t)&fstatus->rptr, endian_le32(status->rptr) );
+}
+
+void mwmr_read( struct mwmr_s *fifo, void *_ptr, size_t lensw )
+{
+	uint8_t *ptr = _ptr;
+	local_mwmr_status_t status;
+
+#ifdef CONFIG_MWMR_INSTRUMENTATION
+	size_t tot = lensw/fifo->width;
+	uint32_t access_begin = cpu_cycle_count();
+#endif
+
+	mwmr_lock( fifo );
+	rehash_status( fifo, &status );
+    while ( lensw ) {
+        size_t len;
+		while ( status.usage < fifo->width ) {
+			writeback_status( fifo, &status );
+            mwmr_unlock( fifo );
+#if defined(CONFIG_SRL) && !defined(CONFIG_PTHREAD)
+			srl_sched_wait_ge_le(&fifo->status->usage, fifo->width);
+#elif defined(CONFIG_PTHREAD)
+			pthread_yield();
+#else
+			cpu_interrupt_disable();
+			sched_context_switch();
+			cpu_interrupt_enable();
+#endif
+            mwmr_lock( fifo );
+			rehash_status( fifo, &status );
+        }
+        while ( lensw && status.usage >= fifo->width ) {
+			void *sptr;
+
+            if ( status.rptr < status.wptr )
+                len = status.usage;
+            else
+                len = (fifo->gdepth - status.rptr);
+            len = min(len, lensw);
+			sptr = &((uint8_t*)fifo->buffer)[status.rptr];
+			cpu_dcache_invld_buf(sptr, len);
+            memcpy( ptr, sptr, len );
+            status.rptr += len;
+            if ( status.rptr == fifo->gdepth )
+                status.rptr = 0;
+            ptr += len;
+            status.usage -= len;
+            lensw -= len;
+			status.modified = 1;
+        }
+    }
+	writeback_status( fifo, &status );
+
+#ifdef CONFIG_MWMR_INSTRUMENTATION
+	cpu_dcache_invld_buf(fifo, sizeof(*fifo));
+	fifo->n_read += tot;
+	fifo->time_read += cpu_cycle_count()-access_begin;
+#endif
+
+	mwmr_unlock( fifo );
+}
+
+void mwmr_write( struct mwmr_s *fifo, const void *_ptr, size_t lensw )
+{
+	const uint8_t *ptr = _ptr;
+    local_mwmr_status_t status;
+
+#ifdef CONFIG_MWMR_INSTRUMENTATION
+	size_t tot = lensw/fifo->width;
+	uint32_t access_begin = cpu_cycle_count();
+#endif
+
+	mwmr_lock( fifo );
+	rehash_status( fifo, &status );
+    while ( lensw ) {
+        size_t len;
+        while (status.usage >= fifo->gdepth) {
+			writeback_status( fifo, &status );
+            mwmr_unlock( fifo );
+#if defined(CONFIG_SRL) && !defined(CONFIG_PTHREAD)
+			srl_sched_wait_le_le(&fifo->status->usage, fifo->gdepth-fifo->width);
+#elif defined(CONFIG_PTHREAD)
+			pthread_yield();
+#else
+			cpu_interrupt_disable();
+			sched_context_switch();
+			cpu_interrupt_enable();
+#endif
+            mwmr_lock( fifo );
+			rehash_status( fifo, &status );
+        }
+        while ( lensw && status.usage < fifo->gdepth ) {
+			void *dptr;
+
+            if ( status.rptr <= status.wptr )
+                len = (fifo->gdepth - status.wptr);
+            else
+                len = fifo->gdepth - status.usage;
+            len = min(len, lensw);
+			dptr = &((uint8_t*)fifo->buffer)[status.wptr];
+            memcpy( dptr, ptr, len );
+            status.wptr += len;
+            if ( status.wptr == fifo->gdepth )
+                status.wptr = 0;
+            ptr += len;
+            status.usage += len;
+            lensw -= len;
+			status.modified = 1;
+        }
+    }
+	writeback_status( fifo, &status );
+
+#ifdef CONFIG_MWMR_INSTRUMENTATION
+	cpu_dcache_invld_buf(fifo, sizeof(*fifo));
+	fifo->n_write += tot;
+	fifo->time_write += cpu_cycle_count()-access_begin;
+#endif
+
+	mwmr_unlock( fifo );
+}
+
+size_t mwmr_try_read( struct mwmr_s *fifo, void *_ptr, size_t lensw )
+{
+	uint8_t *ptr = _ptr;
+	size_t done = 0;
+    local_mwmr_status_t status;
+#ifdef CONFIG_MWMR_INSTRUMENTATION
+	uint32_t access_begin = cpu_cycle_count();
+#endif
+
+	if ( mwmr_try_lock( fifo ) )
+		return done;
+	rehash_status( fifo, &status );
+	while ( lensw && status.usage >= fifo->width ) {
+        size_t len;
+		void *sptr;
+
+		if ( status.rptr < status.wptr )
+			len = status.usage;
+		else
+			len = (fifo->gdepth - status.rptr);
+		len = min(len, lensw);
+		sptr = &((uint8_t*)fifo->buffer)[status.rptr];
+		cpu_dcache_invld_buf(sptr, len);
+		memcpy( ptr, sptr, len );
+		status.rptr += len;
+		if ( status.rptr == fifo->gdepth )
+			status.rptr = 0;
+		ptr += len;
+		status.usage -= len;
+		lensw -= len;
+		done += len;
+		status.modified = 1;
+	}
+	writeback_status( fifo, &status );
+	mwmr_unlock( fifo );
+#ifdef CONFIG_MWMR_INSTRUMENTATION
+	cpu_dcache_invld_buf(fifo, sizeof(*fifo));
+	fifo->n_read += done/fifo->width;
+	fifo->time_read += cpu_cycle_count()-access_begin;
+#endif
+	return done;
+}
+
+size_t mwmr_try_write( struct mwmr_s *fifo, const void *_ptr, size_t lensw )
+{
+	const uint8_t *ptr = _ptr;
+	size_t done = 0;
+    local_mwmr_status_t status;
+#ifdef CONFIG_MWMR_INSTRUMENTATION
+	uint32_t access_begin = cpu_cycle_count();
+#endif
+
+	if ( mwmr_try_lock( fifo ) )
+		return done;
+	rehash_status( fifo, &status );
+	while ( lensw && status.usage < fifo->gdepth ) {
+        size_t len;
+		void *dptr;
+
+		if ( status.rptr <= status.wptr )
+			len = (fifo->gdepth - status.wptr);
+		else
+			len = fifo->gdepth - status.usage;
+		len = min(len, lensw);
+		dptr = &((uint8_t*)fifo->buffer)[status.wptr];
+		memcpy( dptr, ptr, len );
+		status.wptr += len;
+		if ( status.wptr == fifo->gdepth )
+			status.wptr = 0;
+		ptr += len;
+		status.usage += len;
+		lensw -= len;
+		done += len;
+		status.modified = 1;
+    }
+	writeback_status( fifo, &status );
+#ifdef CONFIG_MWMR_INSTRUMENTATION
+	cpu_dcache_invld_buf(fifo, sizeof(*fifo));
+	fifo->n_write += done/fifo->width;
+	fifo->time_write += cpu_cycle_count()-access_begin;
+#endif
+	mwmr_unlock( fifo );
+	return done;
+}
+
+#ifdef CONFIG_MWMR_INSTRUMENTATION
+void mwmr_dump_stats( const struct mwmr_s *mwmr )
+{
+	cpu_dcache_invld_buf(mwmr, sizeof(*mwmr));
+	if ( mwmr->n_read )
+		srl_log_printf(NONE, "read,%s,%d,%d,%d\n",
+					   mwmr->name, cpu_cycle_count(),
+					   mwmr->time_read, mwmr->n_read );
+	if ( mwmr->n_write )
+		srl_log_printf(NONE, "write,%s,%d,%d,%d\n",
+					   mwmr->name, cpu_cycle_count(),
+					   mwmr->time_write, mwmr->n_write );
+}
+
+void mwmr_clear_stats( struct mwmr_s *mwmr )
+{
+	cpu_dcache_invld_buf(mwmr, sizeof(*mwmr));
+	mwmr->time_read =
+		mwmr->n_read =
+		mwmr->time_write =
+		mwmr->n_write = 0;
+}
+#endif
diff --git a/MPSoC/mutekh/examples/avatar/mwmr_soclib_lockfree.c b/MPSoC/mutekh/examples/avatar/mwmr_soclib_lockfree.c
new file mode 100644
index 0000000000000000000000000000000000000000..a50c5cb283190cf0583dc5d4f4c014091aa4e42c
--- /dev/null
+++ b/MPSoC/mutekh/examples/avatar/mwmr_soclib_lockfree.c
@@ -0,0 +1,322 @@
+/*
+ * This file is part of MutekH.
+ * 
+ * MutekH is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; version 2.1 of the License.
+ * 
+ * MutekH is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ * 
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with MutekH; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * Copyright (c) UPMC, Lip6, SoC
+ *         Nicolas Pouillon <nipo@ssji.net>, 2008
+ */
+
+#include <mutek/scheduler.h>
+#include <hexo/types.h>
+#include <hexo/atomic.h>
+#include <hexo/endian.h>
+#include <hexo/interrupt.h>
+#include <string.h>
+#include <mwmr/mwmr.h>
+#include <hexo/cpu.h>
+
+#if defined(CONFIG_SRL) && !defined(CONFIG_PTHREAD)
+# include <srl/srl_sched_wait.h>
+# include <srl/srl_log.h>
+# ifndef SRL_VERBOSITY
+#  define SRL_VERBOSITY VERB_DEBUG
+# endif
+#elif defined(CONFIG_PTHREAD)
+# include <pthread.h>
+#endif
+
+void
+mwmr_hw_init( void *coproc, enum SoclibMwmrWay way,
+			  size_t no, const struct mwmr_s* mwmr )
+{
+	uintptr_t c = (uintptr_t)coproc;
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_FIFO_WAY, endian_le32(way));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_FIFO_NO, endian_le32(no));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_STATUS_ADDR, endian_le32((uintptr_t)mwmr->status));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_WIDTH, endian_le32(mwmr->width));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_DEPTH, endian_le32(mwmr->gdepth));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_BUFFER_ADDR, endian_le32((uintptr_t)mwmr->buffer));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_RUNNING, endian_le32(1));
+	cpu_mem_write_32( c + sizeof(uint32_t) * MWMR_CONFIG_ENDIANNESS, 0x11223344);
+}
+
+void mwmr_config( void *coproc, size_t no, const uint32_t val )
+{
+	uintptr_t c = (uintptr_t)coproc;
+	cpu_mem_write_32( c + sizeof(uint32_t) * no, val);
+}
+
+uint32_t mwmr_status( void *coproc, size_t no )
+{
+	uintptr_t c = (uintptr_t)coproc;
+	return cpu_mem_read_32( c + sizeof(uint32_t) * no );
+}
+
+// returns substracted value
+static inline uint32_t
+cpu_atomic_sub_minz(uint32_t *a, uint32_t _tosub)
+{
+	uint32_t oldval;
+	uint32_t tosub;
+
+	do {
+		cpu_dcache_invld(a);
+		oldval = *a;
+		tosub = _tosub;
+		if ( tosub > oldval )
+			tosub = oldval;
+
+		if ( tosub == 0 )
+			return 0;
+	} while ( ! atomic_compare_and_swap((atomic_t*)a, oldval, oldval-tosub) );
+
+	return tosub;
+}
+
+// returns previous value
+static inline uint32_t
+cpu_atomic_add(uint32_t *a, uint32_t val)
+{
+	uint32_t oldval;
+
+	do {
+		cpu_dcache_invld(a);
+		oldval = *a;
+	} while ( ! atomic_compare_and_swap((atomic_t*)a, oldval, oldval+val) );
+
+	return oldval;
+}
+
+// returns previous value
+static inline uint32_t
+cpu_atomic_add_wrap(uint32_t *a, uint32_t val, uint32_t mod)
+{
+	uint32_t oldval, newval;
+
+	do {
+		cpu_dcache_invld(a);
+		oldval = *a;
+		newval = (oldval+val);
+		if ( newval >= mod )
+			newval -= mod;
+	} while ( ! atomic_compare_and_swap((atomic_t*)a, oldval, newval ) );
+
+	return oldval;
+}
+
+static inline void
+cpu_atomic_wait_and_swap(uint32_t *a, uint32_t old, uint32_t new)
+{
+	do {
+	} while ( ! atomic_compare_and_swap((atomic_t*)a, old, new ) );
+}
+
+size_t mwmr_read_unit(
+	struct mwmr_status_s *status,
+	const uint8_t *mwmr_buffer,
+	const size_t gdepth,
+	uint8_t *user_buffer,
+	size_t user_len)
+{
+#if MWMR_DEBUG
+	printk("mwmr_read_unit(status = %p, mwmr_buffer = %p, gdepth = %d, user_buf = %p, user_len = %d)\n",
+		   status, mwmr_buffer, (int)gdepth, user_buffer, (int)user_len);
+#endif
+
+	assert(mwmr_buffer);
+	assert(user_buffer);
+	if ( user_len > gdepth )
+		user_len = gdepth;
+
+	uint32_t xfer_size = cpu_atomic_sub_minz(&status->data_size, user_len);
+	if ( xfer_size == 0 )
+		return 0;
+
+	assert( xfer_size <= gdepth );
+	assert( xfer_size <= user_len );
+
+	uint32_t rptr = cpu_atomic_add_wrap(&status->data_tail, xfer_size, gdepth);
+	uint32_t future_rptr;
+
+	assert( rptr < gdepth );
+
+	if ( rptr + xfer_size > gdepth ) {
+		size_t tx1 = gdepth - rptr;
+		size_t tx2 = xfer_size - tx1;
+
+		cpu_dcache_invld_buf((char*)mwmr_buffer + rptr, tx1);
+		cpu_dcache_invld_buf((char*)mwmr_buffer, tx2);
+		memcpy( user_buffer, mwmr_buffer + rptr, tx1 );
+		memcpy( user_buffer + tx1, mwmr_buffer, tx2 );
+		future_rptr = tx2;
+	} else {
+		cpu_dcache_invld_buf((char*)mwmr_buffer + rptr, xfer_size);
+		memcpy( user_buffer, mwmr_buffer + rptr, xfer_size );
+		future_rptr = rptr + xfer_size;
+		if ( future_rptr == gdepth )
+			future_rptr = 0;
+	}
+	assert( future_rptr < gdepth );
+	cpu_atomic_wait_and_swap(&status->free_head, rptr, future_rptr);
+	cpu_atomic_add(&status->free_size, xfer_size);
+	return xfer_size;
+}
+
+size_t mwmr_write_unit(
+	struct mwmr_status_s *status,
+	uint8_t *mwmr_buffer,
+	const size_t gdepth,
+	const uint8_t *user_buffer,
+	size_t user_len)
+{
+#if MWMR_DEBUG
+	printk("mwmr_write_unit(status = %p, mwmr_buffer = %p, gdepth = %d, user_buf = %p, user_len = %d)\n",
+		   status, mwmr_buffer, (int)gdepth, user_buffer, (int)user_len);
+#endif
+
+	assert(mwmr_buffer);
+	assert(user_buffer);
+	if ( user_len > gdepth )
+		user_len = gdepth;
+
+	uint32_t xfer_size = cpu_atomic_sub_minz(&status->free_size, user_len);
+	if ( xfer_size == 0 )
+		return 0;
+
+	assert( xfer_size <= gdepth );
+	assert( xfer_size <= user_len );
+
+	uint32_t wptr = cpu_atomic_add_wrap(&status->free_tail, xfer_size, gdepth);
+	uint32_t future_wptr;
+
+	assert( wptr < gdepth );
+
+	if ( wptr + xfer_size > gdepth ) {
+		size_t tx1 = gdepth - wptr;
+		size_t tx2 = xfer_size - tx1;
+
+		memcpy( mwmr_buffer + wptr, user_buffer, tx1 );
+		memcpy( mwmr_buffer, user_buffer + tx1, tx2 );
+		future_wptr = tx2;
+	} else {
+		memcpy( mwmr_buffer + wptr, user_buffer, xfer_size );
+		future_wptr = wptr + xfer_size;
+		if ( future_wptr == gdepth )
+			future_wptr = 0;
+	}
+	assert( future_wptr < gdepth );
+	cpu_atomic_wait_and_swap(&status->data_head, wptr, future_wptr);
+	cpu_atomic_add(&status->data_size, xfer_size);
+	return xfer_size;
+}
+
+void mwmr_read( struct mwmr_s *fifo, void *_ptr, size_t lensw )
+{
+#if MWMR_DEBUG
+	printk("mwmr_read(fifo = %p, status = %p, user_buf = %p, user_len = %d)\n",
+		   fifo, fifo->status, _ptr, (int)lensw);
+#endif
+	size_t done = 0;
+	uint8_t *ptr = _ptr;
+	const uint8_t *buffer = fifo->buffer;
+	const size_t gdepth = fifo->gdepth;
+
+	while ( done < lensw ) {
+		size_t xfer_size = mwmr_read_unit(
+			fifo->status, buffer, gdepth,
+			ptr+done, lensw-done);
+
+		if ( xfer_size == 0 ) {
+#if defined(CONFIG_SRL) && !defined(CONFIG_PTHREAD)
+			srl_sched_wait_ne_cpu(&fifo->status->data_size, 0);
+#elif defined(CONFIG_PTHREAD)
+			pthread_yield();
+#else
+			cpu_interrupt_disable();
+			sched_context_switch();
+			cpu_interrupt_enable();
+#endif
+			continue;
+		}
+
+		ptr += xfer_size;
+		done += xfer_size;
+	}
+}
+
+void mwmr_write( struct mwmr_s *fifo, const void *_ptr, size_t lensw )
+{
+#if MWMR_DEBUG
+	printk("mwmr_write(fifo = %p, status = %p, user_buf = %p, user_len = %d)\n",
+		   fifo, fifo->status, _ptr, (int)lensw);
+#endif
+	size_t done = 0;
+	const uint8_t *ptr = _ptr;
+	uint8_t *buffer = fifo->buffer;
+	const size_t gdepth = fifo->gdepth;
+
+	while ( done < lensw ) {
+		size_t xfer_size = mwmr_write_unit(
+			fifo->status, buffer, gdepth,
+			ptr+done, lensw-done);
+
+		if ( xfer_size == 0 ) {
+#if defined(CONFIG_SRL) && !defined(CONFIG_PTHREAD)
+			srl_sched_wait_ne_cpu(&fifo->status->free_size, 0);
+#elif defined(CONFIG_PTHREAD)
+			pthread_yield();
+#else
+			cpu_interrupt_disable();
+			sched_context_switch();
+			cpu_interrupt_enable();
+#endif
+			continue;
+		}
+
+		ptr += xfer_size;
+		done += xfer_size;
+	}
+}
+
+size_t mwmr_try_read( struct mwmr_s *fifo, void *_ptr, size_t lensw )
+{
+#if MWMR_DEBUG
+	printk("mwmr_try_read(fifo = %p, status = %p, user_buf = %p, user_len = %d)\n",
+		   fifo, fifo->status, _ptr, (int)lensw);
+#endif
+	uint8_t *ptr = _ptr;
+	const uint8_t *buffer = fifo->buffer;
+	const size_t gdepth = fifo->gdepth;
+
+	return mwmr_read_unit(
+		fifo->status, buffer, gdepth,
+		ptr, lensw);
+}
+
+size_t mwmr_try_write( struct mwmr_s *fifo, const void *_ptr, size_t lensw )
+{
+#if MWMR_DEBUG
+	printk("mwmr_write_write(fifo = %p, status = %p, user_buf = %p, user_len = %d)\n",
+		   fifo, fifo->status, _ptr, (int)lensw);
+#endif
+	const uint8_t *ptr = _ptr;
+	uint8_t *buffer = fifo->buffer;
+	const size_t gdepth = fifo->gdepth;
+
+	return mwmr_write_unit(
+		fifo->status, buffer, gdepth,
+		ptr, lensw);
+}
diff --git a/MPSoC/src/asyncchannel.c b/MPSoC/src/asyncchannel.c
index dc955930733727e3605c1e5f05aade019db2d05e..802eb9f9ebab96324427ba69f40b07ddcda9542a 100755
--- a/MPSoC/src/asyncchannel.c
+++ b/MPSoC/src/asyncchannel.c
@@ -33,7 +33,14 @@ int async_write_nonblocking( struct mwmr_s *fifo, void *_ptr, int lensw ){
 
 
 void async_read( struct mwmr_s *fifo, void *_ptr, int lensw ){
-    mwmr_read(fifo,_ptr,lensw);
+  debugInt("debug fifo \n",fifo);
+  debugInt("debug ptr \n",_ptr);
+  debugInt("debug  lensw \n", lensw);
+  debugInt("debug  fifo status address \n", &(fifo->status));
+  debugInt("debug  fifo status \n", fifo->status);
+  debugInt("debug  fifo lock address\n", &(fifo->status->lock));
+  debugInt("debug  fifo lock \n", fifo->status->lock);
+  mwmr_read(fifo,_ptr,lensw);
 }
 
 void async_write( struct mwmr_s *fifo, void *_ptr, int lensw ){
@@ -60,9 +67,9 @@ asyncchannel *getNewAsyncchannel(char *outname, char *inname, int isBlocking, in
   asyncch->mwmr_fifo=fifo;
   asyncch->mwmr_fifo->depth=fifo->depth;
   asyncch->mwmr_fifo->width=fifo->width;
-  debugInt("asyncchannel getNew \n",asyncch->mwmr_fifo);
-  debugInt("asyncchannel \n",asyncch->mwmr_fifo->depth);
-  debugInt("asyncchannel \n",asyncch->mwmr_fifo->width);
+  debugInt("asyncchannel address \n",asyncch->mwmr_fifo);
+  debugInt("asyncchannel depth \n",asyncch->mwmr_fifo->depth);
+  debugInt("asyncchannel width \n",asyncch->mwmr_fifo->width);
 
   return asyncch;
 }
diff --git a/MPSoC/src/request_manager.c b/MPSoC/src/request_manager.c
index 7988a3f862d412e264cb6f5089d2c932130d767e..9024ecd9dd385d359a55b966dfccc2c2dac71b60 100755
--- a/MPSoC/src/request_manager.c
+++ b/MPSoC/src/request_manager.c
@@ -23,8 +23,7 @@ void executeSendSyncTransaction(request *req) {
 
   cpt = 0;
   request* currentReq = req->syncChannel->inWaitQueue;
-  //debugMsg("*****Execute send sync transaction");
-
+ 
   while(currentReq != NULL) {
     cpt ++;
     currentReq = currentReq->next;
@@ -40,6 +39,8 @@ void executeSendSyncTransaction(request *req) {
   } 
 
   // Remove all related request from list requests
+  //DG 10.02. einkommentiert
+
   //req->syncChannel->inWaitQueue = removeRequestFromList(req->syncChannel->inWaitQueue, selectedReq);
   debugMsg("Setting related request");
   req->relatedRequest = selectedReq;
@@ -52,18 +53,16 @@ void executeSendSyncTransaction(request *req) {
   copyParameters(req, selectedReq);
 
   debugInt("syncchannel address \n", req->syncChannel->mwmr_fifo);
-  debugInt("***syncchannel nbOfParams \n", req->nbOfParams);
-  //DG 7.2. req->params
-  //sync_write(req->syncChannel->mwmr_fifo, selectedReq->ID, 1 );// transmit ID
-  //sync_write(req->syncChannel->mwmr_fifo, selectedReq->ID,  req->params );
-  sync_write(req->syncChannel->mwmr_fifo, req->params,  req->nbOfParams*sizeof(req->params));
+  debugInt("syncchannel nbOfParams \n", req->nbOfParams);
+  debugInt("syncchannel burst \n", req->nbOfParams*sizeof(req->params));
+   debugInt("syncchannel params \n", req->params[0]);
+  sync_write(req->syncChannel->mwmr_fifo, &(req->params),  req->nbOfParams*sizeof(req->params));
 
-  sync_read(req->syncChannel->mwmr_fifo, req->params,  req->nbOfParams*sizeof(req->params));
+  sync_read(req->syncChannel->mwmr_fifo, &(req->params),  req->nbOfParams*sizeof(req->params));
 
   //DG 8.2. a-t-on besoin de ID?
-  debugInt("***** req->params", req->params);
+  debugInt("req->params", req->params);
   debugMsg("\n");
-// debugMsg("after sync write\n");
  
   debugMsg("Signaling");
   
@@ -75,15 +74,10 @@ void executeSendSyncTransaction(request *req) {
 void executeReceiveSyncTransaction(request *req) {
   int cpt;
   request *selectedReq;
-  //debugMsg("*****Execute receive sync transaction");
-  // At least one transaction available -> must select one randomly
-  // First: count how many of them are available
-  // Then, select one
-  // Broadcast the new condition! DG ce n'est pas un broadcast sur canal
-
+ 
   request* currentReq = req->syncChannel->outWaitQueue;
   cpt = 0;
-  debugMsg("*****Execute receive sync tr");
+  debugMsg("Execute receive sync tr");
 
   while(currentReq != NULL) {
     cpt ++;
@@ -96,6 +90,7 @@ void executeReceiveSyncTransaction(request *req) {
     selectedReq = selectedReq->next;
     cpt --;
   } 
+  //DG 10.02. einkommentiert
 
   //req->syncChannel->outWaitQueue = removeRequestFromList(req->syncChannel->outWaitQueue, selectedReq);
   debugMsg("Setting related request");
@@ -112,17 +107,16 @@ void executeReceiveSyncTransaction(request *req) {
   pthread_cond_signal(selectedReq->listOfRequests->wakeupCondition);
 
   debugInt("syncchannel read: address \n",selectedReq->syncChannel->mwmr_fifo);  
-debugInt("syncchannel read: nbOfParams \n",selectedReq->nbOfParams);  
-  //sync_read(selectedReq->syncChannel->mwmr_fifo, selectedReq->ID, 1);
-  //transmit ID
-  //DG 8.2. params
-sync_write(selectedReq->syncChannel->mwmr_fifo, selectedReq->params,  selectedReq->nbOfParams*sizeof(selectedReq->params) );
-
-sync_read(selectedReq->syncChannel->mwmr_fifo, selectedReq->params,  selectedReq->nbOfParams*sizeof(selectedReq->params) );
- //DG 7.2. params
-  //sync_read(selectedReq->syncChannel->mwmr_fifo, selectedReq->ID, &req->params);
+  debugInt("syncchannel read: nbOfParams \n",selectedReq->nbOfParams);  
+  debugInt("syncchannel burst \n", req->nbOfParams*sizeof(req->params));
+  debugInt("syncchannel params \n", req->params[0]);
+  
+  sync_write(selectedReq->syncChannel->mwmr_fifo, &(selectedReq->params),  selectedReq->nbOfParams*sizeof(selectedReq->params) );
+
+  sync_read(selectedReq->syncChannel->mwmr_fifo, &(selectedReq->params),  selectedReq->nbOfParams*sizeof(selectedReq->params) );
+ 
   debugMsg("after syncchannel read");
-  debugInt("***** req->params \n", req->params);
+  debugInt("req->params \n", req->params);
   traceSynchroRequest(selectedReq, req);
 }
 
@@ -420,19 +414,16 @@ void private__makeRequestPending(setOfRequests *list) {
       }
       
     }
-
     req = req->nextRequestInList;
   }
 }
 
 void private__makeRequest(request *req) {
   if (req->type == SEND_SYNC_REQUEST) {
-  debugMsg("@@@@@@@@@@@@@@@@@@@@@@@@@@@send");
     executeSendSyncTransaction(req);
   }
 
   if (req->type == RECEIVE_SYNC_REQUEST) {
-debugMsg("##########################receive");
     executeReceiveSyncTransaction(req);
   }
 
diff --git a/MPSoC/src/request_manager.c-orig b/MPSoC/src/request_manager.c-orig
new file mode 100755
index 0000000000000000000000000000000000000000..7988a3f862d412e264cb6f5089d2c932130d767e
--- /dev/null
+++ b/MPSoC/src/request_manager.c-orig
@@ -0,0 +1,606 @@
+#include <stdlib.h>
+#include <pthread.h>
+#include <time.h>
+
+#include "request_manager.h"
+#include "request.h"
+#include "myerrors.h"
+#include "debug.h"
+#include "mytimelib.h"
+#include "random.h"
+#include "asyncchannel.h"
+#include "syncchannel.h"
+#include "tracemanager.h"
+
+void executeSendSyncTransaction(request *req) {
+  int cpt;
+  request *selectedReq;
+
+  // At least one transaction available -> must select one randomly
+  // First: count how many of them are available
+  // Then, select one
+  // Broadcast the new condition!
+
+  cpt = 0;
+  request* currentReq = req->syncChannel->inWaitQueue;
+  //debugMsg("*****Execute send sync transaction");
+
+  while(currentReq != NULL) {
+    cpt ++;
+    currentReq = currentReq->next;
+  }
+
+  cpt = random() % cpt;
+
+  // Head of the list?
+  selectedReq = req->syncChannel->inWaitQueue;
+  while (cpt > 0) {
+    selectedReq = selectedReq->next;
+    cpt --;
+  } 
+
+  // Remove all related request from list requests
+  //req->syncChannel->inWaitQueue = removeRequestFromList(req->syncChannel->inWaitQueue, selectedReq);
+  debugMsg("Setting related request");
+  req->relatedRequest = selectedReq;
+
+  // Select the selected request, and notify the information
+  selectedReq->selected = 1;
+  selectedReq->listOfRequests->selectedRequest = selectedReq;
+
+  // Handle parameters
+  copyParameters(req, selectedReq);
+
+  debugInt("syncchannel address \n", req->syncChannel->mwmr_fifo);
+  debugInt("***syncchannel nbOfParams \n", req->nbOfParams);
+  //DG 7.2. req->params
+  //sync_write(req->syncChannel->mwmr_fifo, selectedReq->ID, 1 );// transmit ID
+  //sync_write(req->syncChannel->mwmr_fifo, selectedReq->ID,  req->params );
+  sync_write(req->syncChannel->mwmr_fifo, req->params,  req->nbOfParams*sizeof(req->params));
+
+  sync_read(req->syncChannel->mwmr_fifo, req->params,  req->nbOfParams*sizeof(req->params));
+
+  //DG 8.2. a-t-on besoin de ID?
+  debugInt("***** req->params", req->params);
+  debugMsg("\n");
+// debugMsg("after sync write\n");
+ 
+  debugMsg("Signaling");
+  
+  pthread_cond_signal(selectedReq->listOfRequests->wakeupCondition);
+
+  traceSynchroRequest(req, selectedReq);
+}
+
+void executeReceiveSyncTransaction(request *req) {
+  int cpt;
+  request *selectedReq;
+  //debugMsg("*****Execute receive sync transaction");
+  // At least one transaction available -> must select one randomly
+  // First: count how many of them are available
+  // Then, select one
+  // Broadcast the new condition! DG ce n'est pas un broadcast sur canal
+
+  request* currentReq = req->syncChannel->outWaitQueue;
+  cpt = 0;
+  debugMsg("*****Execute receive sync tr");
+
+  while(currentReq != NULL) {
+    cpt ++;
+    //debugInt("cpt", cpt);
+    currentReq = currentReq->next;
+  }
+  cpt = random() % cpt;
+  selectedReq = req->syncChannel->outWaitQueue;
+  while (cpt > 0) {
+    selectedReq = selectedReq->next;
+    cpt --;
+  } 
+
+  //req->syncChannel->outWaitQueue = removeRequestFromList(req->syncChannel->outWaitQueue, selectedReq);
+  debugMsg("Setting related request");
+  req->relatedRequest = selectedReq;
+
+  // Select the request, and notify the information in the channel
+  selectedReq->selected = 1;
+  selectedReq->listOfRequests->selectedRequest = selectedReq;
+
+  // Handle parameters
+  copyParameters(selectedReq, req);
+
+  debugMsg("Signaling");
+  pthread_cond_signal(selectedReq->listOfRequests->wakeupCondition);
+
+  debugInt("syncchannel read: address \n",selectedReq->syncChannel->mwmr_fifo);  
+debugInt("syncchannel read: nbOfParams \n",selectedReq->nbOfParams);  
+  //sync_read(selectedReq->syncChannel->mwmr_fifo, selectedReq->ID, 1);
+  //transmit ID
+  //DG 8.2. params
+sync_write(selectedReq->syncChannel->mwmr_fifo, selectedReq->params,  selectedReq->nbOfParams*sizeof(selectedReq->params) );
+
+sync_read(selectedReq->syncChannel->mwmr_fifo, selectedReq->params,  selectedReq->nbOfParams*sizeof(selectedReq->params) );
+ //DG 7.2. params
+  //sync_read(selectedReq->syncChannel->mwmr_fifo, selectedReq->ID, &req->params);
+  debugMsg("after syncchannel read");
+  debugInt("***** req->params \n", req->params);
+  traceSynchroRequest(selectedReq, req);
+}
+
+
+void executeSendAsyncTransaction(request *req) {
+  request *selectedReq;
+
+  // Full FIFO?
+  if (req->asyncChannel->currentNbOfMessages == req->asyncChannel->maxNbOfMessages) {
+    // Must remove the oldest  message
+    getAndRemoveOldestMessageFromAsyncChannel(req->asyncChannel);
+  }
+
+  addMessageToAsyncChannel(req->asyncChannel, req->msg);
+  
+  debugMsg("Signaling async write to all requests waiting ");
+  selectedReq = req->asyncChannel->inWaitQueue;
+  while (selectedReq != NULL) {
+    pthread_cond_signal(selectedReq->listOfRequests->wakeupCondition);
+    selectedReq = selectedReq->next;
+  }
+  debugMsg("Signaling done");
+
+  traceAsynchronousSendRequest(req);
+}
+
+void executeReceiveAsyncTransaction(request *req) {
+  int i;
+  request *selectedReq;
+
+  req->msg = getAndRemoveOldestMessageFromAsyncChannel(req->asyncChannel);
+    
+  selectedReq = req->asyncChannel->outWaitQueue;
+
+  // Must recopy parameters
+  for(i=0; i<req->nbOfParams; i++) {
+    *(req->params[i]) = req->msg->params[i];
+  }
+
+  traceAsynchronousReceiveRequest(req);
+
+  // unallocate message
+  destroyMessageWithParams(req->msg);
+
+  debugMsg("Signaling async read to all requests waiting ");
+  while (selectedReq != NULL) {
+    pthread_cond_signal(selectedReq->listOfRequests->wakeupCondition);
+    selectedReq = selectedReq->next;
+  }
+  debugMsg("Signaling done");
+}
+
+
+void executeSendBroadcastTransaction(request *req) {
+  int cpt;
+  request *tmpreq;
+
+  // At least one transaction available -> must select all of them
+  // but at most one per task
+  // Then, broadcast the new condition!
+
+  request* currentReq = req->syncChannel->inWaitQueue;
+  request* currentLastReq = req;
+  debugMsg("Execute broadcast sync tr");
+
+  
+  while(currentReq != NULL) {
+    tmpreq = hasIdenticalRequestInListOfSelectedRequests(currentReq, req->relatedRequest);
+    if (tmpreq != NULL) {
+      // Must select one of the two
+      // If =1, replace, otherwise, just do nothing
+      cpt = random() % 2;
+      if (cpt == 1) {
+	debugMsg("Replacing broadcast request");
+	req->relatedRequest = replaceInListOfSelectedRequests(tmpreq, currentReq, req->relatedRequest);
+	currentReq->listOfRequests->selectedRequest = currentReq;
+	copyParameters(req, currentReq);
+	currentReq->selected = 1;
+	currentLastReq = req;
+	while(currentLastReq->relatedRequest != NULL) {
+	  currentLastReq = currentLastReq->relatedRequest;
+	}
+      }
+    } else {
+      currentLastReq->relatedRequest = currentReq;
+      currentReq->relatedRequest = NULL;
+      currentReq->selected = 1;
+      currentReq->listOfRequests->selectedRequest = currentReq;
+      copyParameters(req, currentReq);
+      currentLastReq = currentReq;
+    }
+
+    currentReq = currentReq->next;
+    
+    debugInt("Nb of requests selected:", nbOfRelatedRequests(req));
+  }
+
+
+  debugMsg("Signaling");
+  currentReq = req->relatedRequest;
+  cpt = 0;
+  while(currentReq != NULL) {
+    cpt ++;
+    pthread_cond_signal(currentReq->listOfRequests->wakeupCondition);
+    traceSynchroRequest(req, currentReq);
+    currentReq = currentReq->relatedRequest;
+  }
+
+  debugInt("NUMBER of broadcast Requests", cpt);
+}
+
+
+int executable(setOfRequests *list, int nb) {
+  int cpt = 0;
+  //int index = 0;
+  request *req = list->head;
+  timespec ts;
+  int tsDone = 0;
+
+  debugMsg("Starting loop");
+
+  list->hasATimeRequest = 0;
+
+  while(req != NULL) {
+    if (!(req->delayElapsed)) {
+      if (req->hasDelay) {
+	// Is the delay elapsed???
+	debugTime("begin time of list of request", &list->startTime);
+	debugTime("start time of this request", &req->myStartTime);
+	if (tsDone == 0) {
+	  my_clock_gettime(&ts);
+	  debugTime("Current time", &ts);
+	  tsDone = 1;
+	}
+
+	if (isBefore(&ts, &(req->myStartTime)) == 1) {
+	  // Delay not elapsed
+	  debugMsg("---------t--------> delay NOT elapsed");
+	  if (list->hasATimeRequest == 0) {
+	    list->hasATimeRequest = 1;
+	    list->minTimeToWait.tv_nsec = req->myStartTime.tv_nsec;
+	    list->minTimeToWait.tv_sec = req->myStartTime.tv_sec;
+	  } else {
+	    minTime(&(req->myStartTime), &(list->minTimeToWait),&(list->minTimeToWait));
+	  }
+	}  else {
+	  // Delay elapsed
+	  debugMsg("---------t--------> delay elapsed");
+	  req->delayElapsed = 1;
+	}
+      } else {
+	req->delayElapsed = 1;
+      }
+    }
+    req = req->nextRequestInList;
+  }
+  
+  req = list->head;
+  while((req != NULL) && (cpt < nb)) {
+    req->executable = 0;
+    if (req->delayElapsed) {
+      if (req->type == SEND_SYNC_REQUEST) {
+	//DG 8.2. ici le probleme! wait queue empty pour B0 :(
+	debugMsg("Send sync");
+	debugInt("req->syncChannel->inWaitQueue ",req->syncChannel->inWaitQueue);
+	if (req->syncChannel->inWaitQueue != NULL) {// DG 8.2. non c'est correct: il faut un rendez-vous synchrone entre inqueue et outqueue
+        //if (req->syncChannel->outWaitQueue != NULL) {//DG 8.2.??
+	  debugMsg("Send sync executable");
+	  req->executable = 1;
+	  cpt ++;
+	  }  else {
+	  debugMsg("Send sync not executable");
+	  }
+	  ////index ++;
+      }
+
+      if (req->type == RECEIVE_SYNC_REQUEST) {
+	debugMsg("receive sync");
+	if (req->syncChannel->outWaitQueue != NULL) {// DG 8.2. non c'est correct: il faut un rendez-vous synchrone entre inqueue et outqueue
+        //if (req->syncChannel->inWaitQueue != NULL) {//DG 8.2.??
+	  req->executable = 1;
+	  debugMsg("Receive sync executable");
+	  cpt ++;
+	}
+ else {
+	  debugMsg("Receive sync not executable");
+	  }
+	//index ++;
+      }
+
+      if (req->type == SEND_ASYNC_REQUEST) {
+	debugMsg("Send async");
+
+	if (!(req->asyncChannel->isBlocking)) {
+	  // Can always add a message -> executable
+	  debugMsg("Send async executable since non blocking");
+	  req->executable = 1;
+	  cpt ++;
+
+	  //blocking case ... channel full?
+	} else {
+	  if (req->asyncChannel->currentNbOfMessages < req->asyncChannel->maxNbOfMessages) {
+	    // Not full!
+	    debugMsg("Send async executable since channel not full");
+	    req->executable = 1;
+	    cpt ++;
+	  } else {
+	    debugMsg("Send async not executable: full, and channel is blocking");
+	  }
+	}
+      }
+
+      if (req->type == RECEIVE_ASYNC_REQUEST) {
+	debugMsg("receive async");
+	if (req->asyncChannel->currentNbOfMessages >0) {
+	  debugMsg("Receive async executable: not empty");
+	  req->executable = 1;
+	  cpt ++;
+	} else {
+	  debugMsg("Receive async not executable: empty");
+	}
+	//index ++;
+      }
+      
+
+      if (req->type == SEND_BROADCAST_REQUEST) {
+	debugMsg("send broadcast");
+	req->executable = 1;
+	cpt ++;
+      }
+
+      if (req->type == RECEIVE_BROADCAST_REQUEST) {
+	debugMsg("receive broadcast");
+	// A receive broadcast is never executable
+	req->executable = 0;
+	//index ++;
+      }
+
+      
+      
+
+      if (req->type == IMMEDIATE) {
+	debugMsg("immediate");
+	req->executable = 1;
+	cpt ++;
+      }
+    }
+
+    req = req->nextRequestInList;
+    
+  }
+
+  return cpt;
+}
+
+void private__makeRequestPending(setOfRequests *list) {
+  request *req = list->head;
+  while(req != NULL) {
+    if ((req->delayElapsed) && (!(req->alreadyPending))) {
+      if (req->type == SEND_SYNC_REQUEST) {
+	debug2Msg(list->owner,"Adding pending sync request in outWaitqueue");
+	req->syncChannel->outWaitQueue = addToRequestQueue(req->syncChannel->outWaitQueue, req);
+
+	req->alreadyPending = 1;
+      }
+
+      if (req->type ==  RECEIVE_SYNC_REQUEST) {
+	debugMsg("Adding pending request in inWaitqueue");
+	req->alreadyPending = 1;
+	req->syncChannel->inWaitQueue = addToRequestQueue(req->syncChannel->inWaitQueue, req);
+      }
+
+      if (req->type == SEND_ASYNC_REQUEST) {
+	debugMsg("Adding pending async request in outWaitqueue");
+	req->asyncChannel->outWaitQueue = addToRequestQueue(req->asyncChannel->outWaitQueue, req);
+	req->alreadyPending = 1;
+      }
+
+      if (req->type ==  RECEIVE_ASYNC_REQUEST) {
+	debugMsg("Adding pending request in inWaitqueue");
+	req->alreadyPending = 1;
+	req->asyncChannel->inWaitQueue = addToRequestQueue(req->asyncChannel->inWaitQueue, req);
+      }
+
+      if (req->type ==  RECEIVE_BROADCAST_REQUEST) {
+	debugMsg("Adding pending broadcast request in inWaitqueue");
+	req->alreadyPending = 1;
+	req->syncChannel->inWaitQueue = addToRequestQueue(req->syncChannel->inWaitQueue, req);
+      }
+
+      if (req->type ==  SEND_BROADCAST_REQUEST) {
+	debugMsg("Adding pending broadcast request in outWaitqueue");
+	req->alreadyPending = 1;
+	req->syncChannel->outWaitQueue = addToRequestQueue(req->syncChannel->outWaitQueue, req);
+      }
+      
+    }
+
+    req = req->nextRequestInList;
+  }
+}
+
+void private__makeRequest(request *req) {
+  if (req->type == SEND_SYNC_REQUEST) {
+  debugMsg("@@@@@@@@@@@@@@@@@@@@@@@@@@@send");
+    executeSendSyncTransaction(req);
+  }
+
+  if (req->type == RECEIVE_SYNC_REQUEST) {
+debugMsg("##########################receive");
+    executeReceiveSyncTransaction(req);
+  }
+
+  if (req->type == SEND_ASYNC_REQUEST) {
+    executeSendAsyncTransaction(req);
+  }
+
+  if (req->type == RECEIVE_ASYNC_REQUEST) {
+    executeReceiveAsyncTransaction(req);
+  }
+
+  if (req->type == SEND_BROADCAST_REQUEST) {
+    executeSendBroadcastTransaction(req);
+  }
+
+  // IMMEDIATE: Nothing to do
+  
+  // In all cases: remove other requests of the same list from their pending form
+  debugMsg("Removing original req");
+  removeAllPendingRequestsFromPendingLists(req, 1);
+  removeAllPendingRequestsFromPendingListsRelatedRequests(req);
+  /*if (req->relatedRequest != NULL) {
+    debugMsg("Removing related req");
+    removeAllPendingRequestsFromPendingLists(req->relatedRequest, 0);
+    }*/
+  
+}
+
+void removeAllPendingRequestsFromPendingListsRelatedRequests(request *req) {
+  if (req->relatedRequest != NULL) {
+    debugMsg("Removing related req");
+    removeAllPendingRequestsFromPendingLists(req->relatedRequest, 0);
+    // Recursive call
+    removeAllPendingRequestsFromPendingListsRelatedRequests(req->relatedRequest);
+  }
+}
+
+
+request *private__executeRequests0(setOfRequests *list, int nb) {
+  int howMany, found;
+  int selectedIndex, realIndex;
+  request *selectedReq;
+  request *req;
+  
+  // Compute which requests can be executed
+ 
+  howMany = executable(list, nb);
+ 
+  debugInt("Counting requests=", howMany);
+ 
+  if (howMany == 0) {
+    debugMsg("**No pending requests");
+    // Must make them pending
+    
+    private__makeRequestPending(list);
+
+    return NULL;
+  }
+  
+  debugInt("At least one pending request is executable", howMany);
+
+  
+  // Select a request
+  req = list->head;
+
+  selectedIndex = (rand() % howMany)+1;
+  debugInt("selectedIndex=", selectedIndex);
+  realIndex = 0;
+  found = 0;
+  while(req != NULL) {
+    if (req->executable == 1) {
+      found ++;
+      if (found == selectedIndex) {
+	break;
+      }
+    }
+    realIndex ++;
+    req = req->nextRequestInList;
+  }
+
+  debugInt("Getting request at index", realIndex);
+  selectedReq = getRequestAtIndex(list, realIndex);
+  selectedReq->selected = 1;
+  selectedReq->listOfRequests->selectedRequest = selectedReq;
+
+  debugInt("Selected request of type", selectedReq->type);
+
+  // Execute that request
+  private__makeRequest(selectedReq);
+
+  return selectedReq;  
+}
+
+
+request *private__executeRequests(setOfRequests *list) {
+  // Is a request already selected?
+
+  if (list->selectedRequest != NULL) {
+    return list->selectedRequest;
+  }
+
+  debug2Msg(list->owner,"No request selected -> looking for one!");
+
+  return private__executeRequests0(list, nbOfRequests(list));
+}
+
+request *executeOneRequest(setOfRequests *list, request *req) {
+  req->nextRequestInList = NULL;
+  req->listOfRequests = list;
+  list->head = req;
+  return executeListOfRequests(list);
+}
+
+
+void setLocalStartTime(setOfRequests *list) {
+  request *req = list->head;
+
+  while(req != NULL) {
+    if (req->hasDelay) {
+      req->delayElapsed = 0;
+      addTime(&(list->startTime), &(req->delay), &(req->myStartTime));
+      debug2Msg(list->owner, " -----t------>: Request with delay");
+    } else {
+      req->delayElapsed = 1;
+      req->myStartTime.tv_nsec = list->startTime.tv_nsec;
+      req->myStartTime.tv_sec = list->startTime.tv_sec;
+    }
+    req = req->nextRequestInList;
+  }
+}
+
+
+// Return the executed request
+request *executeListOfRequests(setOfRequests *list) {
+  request *req;
+ 
+  my_clock_gettime(&list->startTime);
+  list->selectedRequest = NULL;
+  setLocalStartTime(list);
+  
+  // Try to find a request that could be executed
+  debug2Msg(list->owner, "Locking mutex");
+  pthread_mutex_lock(list->mutex);
+  debug2Msg(list->owner, "Mutex locked");
+
+  debug2Msg(list->owner, "Going to execute request");
+
+  while((req = private__executeRequests(list)) == NULL) {
+    debug2Msg(list->owner, "Waiting for request!");
+    if (list->hasATimeRequest == 1) {
+      debug2Msg(list->owner, "Waiting for a request and at most for a given time");
+      debugTime("Min time to wait=", &(list->minTimeToWait));
+      pthread_cond_timedwait(list->wakeupCondition, list->mutex, &(list->minTimeToWait));
+    } else {
+      debug2Msg(list->owner, "Releasing mutex");
+     
+      pthread_cond_wait(list->wakeupCondition, list->mutex);
+    }
+    debug2Msg(list->owner, "Waking up for requests! -> getting mutex");
+  }
+
+  debug2Msg(list->owner, "Request selected!");
+
+  my_clock_gettime(&list->completionTime);
+ debug2Msg(list->owner, "Request selected0!");
+  pthread_mutex_unlock(list->mutex); 
+debug2Msg(list->owner, "Request selected1!");
+  debug2Msg(list->owner, "Mutex unlocked");
+  return req;
+}
+
diff --git a/build.txt b/build.txt
index 73f1028f8696e50d5bc35ca00ba1d7bafc454c41..ab68e80358bf1196d362568cc0e8971e08c875a9 100644
--- a/build.txt
+++ b/build.txt
@@ -1 +1 @@
-12127
\ No newline at end of file
+12128
\ No newline at end of file
diff --git a/src/ddtranslatorSoclib/toSoclib/TasksAndMainGenerator.java b/src/ddtranslatorSoclib/toSoclib/TasksAndMainGenerator.java
index 6c87f252f324f23a0eba8f7ef90aad768643c613..b3ff949903db0e975ac86e429dc56f384aa4a16d 100755
--- a/src/ddtranslatorSoclib/toSoclib/TasksAndMainGenerator.java
+++ b/src/ddtranslatorSoclib/toSoclib/TasksAndMainGenerator.java
@@ -240,7 +240,8 @@ public class TasksAndMainGenerator {
 	mainFile.appendToMainCode(getChannelName(ar, i) + "_status.lock = 0;" + CR2);
 	//DG 10.0.2 width=1??
 	mainFile.appendToMainCode(getChannelName(ar, i) + ".width = 1;" + CR);
-	mainFile.appendToMainCode(getChannelName(ar, i) + ".depth = 4;" + CR);//DG 10.02.2017 systematiquement des entiers pour le moment
+	//	mainFile.appendToMainCode(getChannelName(ar, i) + ".depth = 4;" + CR);//DG 10.02.2017 systematiquement des entiers pour le moment	
+mainFile.appendToMainCode(getChannelName(ar, i) + ".depth = 32;" + CR);//DG 14.02.
 	mainFile.appendToMainCode(getChannelName(ar, i) + ".gdepth = " +getChannelName(ar, i)+".depth;" + CR);
 	mainFile.appendToMainCode(getChannelName(ar, i) + ".buffer = "+getChannelName(ar, i)+"_data;" + CR);
 	mainFile.appendToMainCode(getChannelName(ar, i) + ".status = &"+getChannelName(ar, i)+"_status;" + CR2);
diff --git a/src/ui/DefaultText.java b/src/ui/DefaultText.java
index 7844130a570ecd235b30b2c48e0aee61b23135b0..25d805a485c33ec436e09b465069c1391315fe3f 100755
--- a/src/ui/DefaultText.java
+++ b/src/ui/DefaultText.java
@@ -49,8 +49,8 @@ package ui;
 
 public class DefaultText  {
 
-    public static String BUILD = "12126";
-    public static String DATE = "2017/02/14 02:01:21 CET";
+    public static String BUILD = "12127";
+    public static String DATE = "2017/02/15 02:01:04 CET";
     
     
     public static StringBuffer sbAbout = makeAbout();