Computes the reaction terms $f(u,v)$ and $g(u,v)$.
164{
165 ASSERTL0(inarray.get() != outarray.get(),
166 "Must have different arrays for input and output.");
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
191 size_t i = 0;
194
195 Array<OneD, NekDouble> &tmp = outarray[11];
196 Array<OneD, NekDouble> &tmp2 = outarray[12];
197
198
199 Array<OneD, NekDouble> &tmp_E_na = outarray[14];
203
204
205 Array<OneD, NekDouble> &tmp_I_Na = outarray[15];
206 Vmath::Vsub(n, inarray[0], 1, tmp_E_na, 1, tmp_I_Na, 1);
207 Vmath::Vmul(n, inarray[1], 1, tmp_I_Na, 1, tmp_I_Na, 1);
208 Vmath::Vmul(n, inarray[1], 1, tmp_I_Na, 1, tmp_I_Na, 1);
209 Vmath::Vmul(n, inarray[1], 1, tmp_I_Na, 1, tmp_I_Na, 1);
210 Vmath::Vmul(n, inarray[2], 1, tmp_I_Na, 1, tmp_I_Na, 1);
211 Vmath::Vmul(n, inarray[3], 1, tmp_I_Na, 1, tmp_I_Na, 1);
213 Vmath::Vsub(n, outarray[0], 1, tmp_I_Na, 1, outarray[0], 1);
214 Vmath::Smul(n, -1.0, tmp_I_Na, 1, outarray[16], 1);
215
216
217 Array<OneD, NekDouble> &tmp_I_b_Na = outarray[15];
218 Vmath::Vsub(n, inarray[0], 1, tmp_E_na, 1, tmp_I_b_Na, 1);
220 Vmath::Vsub(n, outarray[0], 1, tmp_I_b_Na, 1, outarray[0], 1);
221 Vmath::Vsub(n, outarray[16], 1, tmp_I_b_Na, 1, outarray[16], 1);
222
223
224 Array<OneD, NekDouble> &tmp_V_E_k = outarray[14];
228 Vmath::Vsub(n, inarray[0], 1, tmp_V_E_k, 1, tmp_V_E_k, 1);
229
230
231 Array<OneD, NekDouble> &tmp_I_K1 = outarray[15];
236 Vmath::Vdiv(n, tmp_V_E_k, 1, tmp_I_K1, 1, tmp_I_K1, 1);
238 Vmath::Vsub(n, outarray[0], 1, tmp_I_K1, 1, outarray[0], 1);
239 Vmath::Smul(n, -1.0, tmp_I_K1, 1, outarray[18], 1);
240
241
242 Array<OneD, NekDouble> &tmp_I_to = outarray[15];
243 Vmath::Vmul(n, inarray[5], 1, tmp_V_E_k, 1, tmp_I_to, 1);
244 Vmath::Vmul(n, inarray[4], 1, tmp_I_to, 1, tmp_I_to, 1);
245 Vmath::Vmul(n, inarray[4], 1, tmp_I_to, 1, tmp_I_to, 1);
246 Vmath::Vmul(n, inarray[4], 1, tmp_I_to, 1, tmp_I_to, 1);
248 Vmath::Vsub(n, outarray[0], 1, tmp_I_to, 1, outarray[0], 1);
249 Vmath::Vsub(n, outarray[18], 1, tmp_I_to, 1, outarray[18], 1);
250
251
252 Array<OneD, NekDouble> &tmp_I_kur = outarray[15];
253 Vmath::Sadd(n, -15.0, inarray[0], 1, tmp_I_kur, 1);
254 Vmath::Smul(n, -1.0 / 13.0, tmp_I_kur, 1, tmp_I_kur, 1);
259 Vmath::Vmul(n, tmp_V_E_k, 1, tmp_I_kur, 1, tmp_I_kur, 1);
260 Vmath::Vmul(n, inarray[6], 1, tmp_I_kur, 1, tmp_I_kur, 1);
261 Vmath::Vmul(n, inarray[6], 1, tmp_I_kur, 1, tmp_I_kur, 1);
262 Vmath::Vmul(n, inarray[6], 1, tmp_I_kur, 1, tmp_I_kur, 1);
263 Vmath::Vmul(n, inarray[7], 1, tmp_I_kur, 1, tmp_I_kur, 1);
265 Vmath::Vsub(n, outarray[0], 1, tmp_I_kur, 1, outarray[0], 1);
266 Vmath::Vsub(n, outarray[18], 1, tmp_I_kur, 1, outarray[18], 1);
267
268
269 Array<OneD, NekDouble> &tmp_I_Kr = outarray[15];
271 Vmath::Smul(n, 1.0 / 22.4, tmp_I_Kr, 1, tmp_I_Kr, 1);
274 Vmath::Vdiv(n, tmp_V_E_k, 1, tmp_I_Kr, 1, tmp_I_Kr, 1);
275 Vmath::Vmul(n, inarray[8], 1, tmp_I_Kr, 1, tmp_I_Kr, 1);
277 Vmath::Vsub(n, outarray[0], 1, tmp_I_Kr, 1, outarray[0], 1);
278 Vmath::Vsub(n, outarray[18], 1, tmp_I_Kr, 1, outarray[18], 1);
279
280
281 Array<OneD, NekDouble> &tmp_I_Ks = outarray[15];
282 Vmath::Vmul(n, inarray[9], 1, tmp_V_E_k, 1, tmp_I_Ks, 1);
283 Vmath::Vmul(n, inarray[9], 1, tmp_I_Ks, 1, tmp_I_Ks, 1);
285 Vmath::Vsub(n, outarray[0], 1, tmp_I_Ks, 1, outarray[0], 1);
286 Vmath::Vsub(n, outarray[18], 1, tmp_I_Ks, 1, outarray[18], 1);
287
288
289 Array<OneD, NekDouble> &tmp_I_b_Ca = outarray[1];
293 Vmath::Vsub(n, inarray[0], 1, tmp_I_b_Ca, 1, tmp_I_b_Ca, 1);
295 Vmath::Vsub(n, outarray[0], 1, tmp_I_b_Ca, 1, outarray[0], 1);
296
297
298 Array<OneD, NekDouble> &tmp_I_Ca_L = outarray[2];
299 Vmath::Sadd(n, -65.0, inarray[0], 1, tmp_I_Ca_L, 1);
300 Vmath::Vmul(n, inarray[10], 1, tmp_I_Ca_L, 1, tmp_I_Ca_L, 1);
301 Vmath::Vmul(n, inarray[11], 1, tmp_I_Ca_L, 1, tmp_I_Ca_L, 1);
302 Vmath::Vmul(n, inarray[12], 1, tmp_I_Ca_L, 1, tmp_I_Ca_L, 1);
304 Vmath::Vsub(n, outarray[0], 1, tmp_I_Ca_L, 1, outarray[0], 1);
305
306
307 Array<OneD, NekDouble> &tmp_f_Na_k = outarray[14];
313 Vmath::Smul(n, 0.1245, tmp_f_Na_k, 1, tmp_f_Na_k, 1);
314 Vmath::Vadd(n, tmp_f_Na_k, 1, tmp, 1, tmp_f_Na_k, 1);
316
317 Array<OneD, NekDouble> &tmp_I_Na_K = outarray[15];
321 Vmath::Vmul(n, tmp_f_Na_k, 1, tmp_I_Na_K, 1, tmp_I_Na_K, 1);
323 tmp_I_Na_K, 1);
324 Vmath::Vsub(n, outarray[0], 1, tmp_I_Na_K, 1, outarray[0], 1);
325 Vmath::Svtvp(n, -3.0, tmp_I_Na_K, 1, outarray[16], 1, outarray[16], 1);
326 Vmath::Svtvp(n, 2.0, tmp_I_Na_K, 1, outarray[18], 1, outarray[18], 1);
327
328
329 Array<OneD, NekDouble> &tmp_I_Na_Ca = outarray[3];
333 Vmath::Sadd(n, 1.0, tmp_I_Na_Ca, 1, tmp_I_Na_Ca, 1);
336 tmp_I_Na_Ca, 1, tmp_I_Na_Ca, 1);
337
347 Vmath::Vdiv(n, tmp, 1, tmp_I_Na_Ca, 1, tmp_I_Na_Ca, 1);
348 Vmath::Vsub(n, outarray[0], 1, tmp_I_Na_Ca, 1, outarray[0], 1);
349 Vmath::Svtvp(n, -3.0, tmp_I_Na_Ca, 1, outarray[16], 1, outarray[16], 1);
350
351
352 Array<OneD, NekDouble> &tmp_I_p_Ca = outarray[4];
353 Vmath::Sadd(n, 0.0005, inarray[17], 1, tmp_I_p_Ca, 1);
354 Vmath::Vdiv(n, inarray[17], 1, tmp_I_p_Ca, 1, tmp_I_p_Ca, 1);
356 Vmath::Vsub(n, outarray[0], 1, tmp_I_p_Ca, 1, outarray[0], 1);
357
358
360
361
364
365
366 Array<OneD, NekDouble> &tmp_I_tr = outarray[5];
367 Vmath::Vsub(n, inarray[20], 1, inarray[19], 1, tmp_I_tr, 1);
369
370
371 Array<OneD, NekDouble> &tmp_I_up_leak = outarray[6];
373 1);
374
375
376 Array<OneD, NekDouble> &tmp_I_up = outarray[7];
380
381
382 Array<OneD, NekDouble> &tmp_I_rel = outarray[8];
383 Vmath::Vsub(n, inarray[19], 1, inarray[17], 1, tmp_I_rel, 1);
384 Vmath::Vmul(n, tmp_I_rel, 1, inarray[13], 1, tmp_I_rel, 1);
385 Vmath::Vmul(n, tmp_I_rel, 1, inarray[13], 1, tmp_I_rel, 1);
386 Vmath::Vmul(n, tmp_I_rel, 1, inarray[14], 1, tmp_I_rel, 1);
387 Vmath::Vmul(n, tmp_I_rel, 1, inarray[15], 1, tmp_I_rel, 1);
389
390
391 Array<OneD, NekDouble> &tmp_B1 = outarray[9];
392 Vmath::Svtvm(n, 2.0, tmp_I_Na_Ca, 1, tmp_I_p_Ca, 1, tmp_B1, 1);
393 Vmath::Vsub(n, tmp_B1, 1, tmp_I_Ca_L, 1, tmp_B1, 1);
394 Vmath::Vsub(n, tmp_B1, 1, tmp_I_b_Ca, 1, tmp_B1, 1);
400
401
402 Array<OneD, NekDouble> &tmp_B2 = outarray[10];
411
412
413 Vmath::Vdiv(n, tmp_B1, 1, tmp_B2, 1, outarray[17], 1);
414
415
416 Vmath::Vsub(n, tmp_I_up, 1, tmp_I_up_leak, 1, outarray[20], 1);
418 outarray[20], 1);
419
420
423 Vmath::Vmul(n, outarray[19], 1, outarray[19], 1, outarray[19], 1);
425 Vmath::Sadd(n, 1.0, outarray[19], 1, outarray[19], 1);
426 Vmath::Vdiv(n, tmp, 1, outarray[19], 1, outarray[19], 1);
427
428
433
434 for (i = 0, v = &inarray[0][0], x = &inarray[1][0], x_new = &outarray[1][0],
436 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
437 {
438 alpha = (*v == (-47.13)) ? 3.2
439 : (0.32 * (*v + 47.13)) /
440 (1.0 - exp((-0.1) * (*v + 47.13)));
441 beta = 0.08 * exp(-(*v) / 11.0);
442 *x_tau = 1.0 / (alpha +
beta);
443 *x_new = alpha * (*x_tau);
444 }
445
446 for (i = 0, v = &inarray[0][0], x = &inarray[2][0], x_new = &outarray[2][0],
448 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
449 {
450 alpha = (*v >= -40.0) ? 0.0 : 0.135 * exp(-((*v) + 80.0) / 6.8);
452 ? 1.0 / (0.13 * (1.0 + exp(-(*v + 10.66) / 11.1)))
453 : 3.56 * exp(0.079 * (*v)) + 310000.0 * exp(0.35 * (*v));
454 *x_tau = 1.0 / (alpha +
beta);
455 *x_new = alpha * (*x_tau);
456 }
457
458 for (i = 0, v = &inarray[0][0], x = &inarray[3][0], x_new = &outarray[3][0],
460 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
461 {
462 alpha =
463 (*v >= -40.0)
464 ? 0.0
465 : (-127140.0 * exp(0.2444 * (*v)) -
466 3.474e-05 * exp(-0.04391 * (*v))) *
467 (((*v) + 37.78) / (1.0 + exp(0.311 * ((*v) + 79.23))));
468 beta = (*v >= -40.0) ? (0.3 * exp(-2.535e-07 * (*v)) /
469 (1.0 + exp(-0.1 * (*v + 32.0))))
470 : 0.1212 * exp(-0.01052 * (*v)) /
471 (1.0 + exp(-0.1378 * (*v + 40.14)));
472 *x_tau = 1.0 / (alpha +
beta);
473 *x_new = alpha * (*x_tau);
474 }
475
476 for (i = 0, v = &inarray[0][0], x = &inarray[4][0], x_new = &outarray[4][0],
478 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
479 {
480 alpha = 0.65 / (exp(-(*v + 10.0) / 8.5) + exp(-(*v - 30.0) / 59.0));
481 beta = 0.65 / (2.5 + exp((*v + 82.0) / 17.0));
483 *x_new = (1.0 / (1.0 + exp(-(*v + 20.47) / 17.54)));
484 }
485
486 for (i = 0, v = &inarray[0][0], x = &inarray[5][0], x_new = &outarray[5][0],
488 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
489 {
490 alpha = 1.0 / (18.53 + exp((*v + 113.7) / 10.95));
491 beta = 1.0 / (35.56 + exp(-(*v + 1.26) / 7.44));
493 *x_new = (1.0 / (1.0 + exp((*v + 43.1) / 5.3)));
494 }
495
496 for (i = 0, v = &inarray[0][0], x = &inarray[6][0], x_new = &outarray[6][0],
498 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
499 {
500 alpha = 0.65 / (exp(-(*v + 10.0) / 8.5) + exp(-(*v - 30.0) / 59.0));
501 beta = 0.65 / (2.5 + exp((*v + 82.0) / 17.0));
503 *x_new = 1.0 / (1.0 + exp(-(*v + 30.3) / 9.6));
504 }
505
506 for (i = 0, v = &inarray[0][0], x = &inarray[7][0], x_new = &outarray[7][0],
508 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
509 {
510 alpha = 1.0 / (21.0 + exp(-(*v - 185.0) / 28.0));
511 beta = exp((*v - 158.0) / 16.0);
513 *x_new = 1.0 / (1.0 + exp((*v - 99.45) / 27.48));
514 }
515
516 for (i = 0, v = &inarray[0][0], x = &inarray[8][0], x_new = &outarray[8][0],
518 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
519 {
520 alpha = 0.0003 * (*v + 14.1) / (1 - exp(-(*v + 14.1) / 5.0));
521 beta = 7.3898e-5 * (*v - 3.3328) / (exp((*v - 3.3328) / 5.1237) - 1.0);
522 *x_tau = 1.0 / (alpha +
beta);
523 *x_new = 1.0 / (1 + exp(-(*v + 14.1) / 6.5));
524 }
525
526 for (i = 0, v = &inarray[0][0], x = &inarray[9][0], x_new = &outarray[9][0],
528 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
529 {
530 alpha = 4e-5 * (*v - 19.9) / (1.0 - exp(-(*v - 19.9) / 17.0));
531 beta = 3.5e-5 * (*v - 19.9) / (exp((*v - 19.9) / 9.0) - 1.0);
532 *x_tau = 0.5 / (alpha +
beta);
533 *x_new = 1.0 /
sqrt(1.0 + exp(-(*v - 19.9) / 12.7));
534 }
535
536 for (i = 0, v = &inarray[0][0], x = &inarray[10][0],
537 x_new = &outarray[10][0], x_tau = &
m_gates_tau[9][0];
538 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
539 {
540 *x_tau = (1 - exp(-(*v + 10.0) / 6.24)) /
541 (0.035 * (*v + 10.0) * (1 + exp(-(*v + 10.0) / 6.24)));
542 *x_new = 1.0 / (1.0 + exp(-(*v + 10) / 8.0));
543 }
544
545 for (i = 0, v = &inarray[0][0], x = &inarray[11][0],
546 x_new = &outarray[11][0], x_tau = &
m_gates_tau[10][0];
547 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
548 {
549
550 *x_tau =
551 9.0 /
552 (0.0197 * exp(-0.0337 * 0.0337 * (*v + 10.0) * (*v + 10.0)) + 0.02);
553 *x_new = exp((-(*v + 28.0)) / 6.9) / (1.0 + exp((-(*v + 28.0)) / 6.9));
554 }
555
556 for (i = 0, v = &inarray[0][0], x = &inarray[12][0],
557 x_new = &outarray[12][0], x_tau = &
m_gates_tau[11][0];
558 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
559 {
560 *x_tau = 2.0;
561 *x_new = 1.0 / (1.0 + inarray[17][i] / 0.00035);
562 }
563
564 Array<OneD, NekDouble> &tmp_Fn = outarray[15];
566 tmp_I_Na_Ca, 1, tmp_Fn, 1);
568
569
570 for (i = 0, v = &tmp_Fn[0], x = &inarray[13][0], x_new = &outarray[13][0],
572 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
573 {
574 *x_tau = 8.0;
575 *x_new = 1.0 / (1.0 + exp(-(*v - 3.4175e-13) / 1.367e-15));
576 }
577
578 for (i = 0, v = &tmp_Fn[0], x = &inarray[14][0], x_new = &outarray[14][0],
580 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
581 {
582 *x_tau = 1.91 + 2.09 / (1.0 + exp(-(*v - 3.4175e-13) / 13.67e-16));
583 *x_new = 1.0 - 1.0 / (1.0 + exp(-(*v - 6.835e-14) / 13.67e-16));
584 }
585
586 for (i = 0, v = &inarray[0][0], x = &inarray[15][0],
587 x_new = &outarray[15][0], x_tau = &
m_gates_tau[14][0];
588 i < n; ++i, ++v, ++x, ++x_new, ++x_tau)
589 {
590 *x_tau = 6.0 * (1.0 - exp(-(*v - 7.9) / 5.0)) /
591 (1.0 + 0.3 * exp(-(*v - 7.9) / 5.0)) / (*v - 7.9);
592 *x_new = 1.0 - 1.0 / (1.0 + exp(-(*v - 40.0) / 17.0));
593 }
594}
#define ASSERTL0(condition, msg)
Array< OneD, Array< OneD, NekDouble > > m_gates_tau
Storage for gate tau values.
@ beta
Gauss Radau pinned at x=-1,.
void Svtsvtp(int n, const T alpha, const T *x, int incx, const T beta, const T *y, int incy, T *z, int incz)
Svtsvtp (scalar times vector plus scalar times vector):
void Vmul(int n, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Multiply vector z = x*y.
void Vlog(int n, const T *x, const int incx, T *y, const int incy)
log y = log(x)
void Vexp(int n, const T *x, const int incx, T *y, const int incy)
exp y = exp(x)
void Svtvp(int n, const T alpha, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Svtvp (scalar times vector plus vector): z = alpha*x + y.
void Svtvm(int n, const T alpha, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Svtvm (scalar times vector minus vector): z = alpha*x - y.
void Vadd(int n, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Add vector z = x+y.
void Smul(int n, const T alpha, const T *x, const int incx, T *y, const int incy)
Scalar multiply y = alpha*x.
void Sdiv(int n, const T alpha, const T *x, const int incx, T *y, const int incy)
Scalar multiply y = alpha/x.
void Vdiv(int n, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Multiply vector z = x/y.
void Zero(int n, T *x, const int incx)
Zero vector.
void Sadd(int n, const T alpha, const T *x, const int incx, T *y, const int incy)
Add vector y = alpha + x.
void Vsub(int n, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Subtract vector z = x-y.
void Vpow(int n, const T *x, const int incx, const T f, T *y, const int incy)
pow y = pow(x, f)
scalarT< T > sqrt(scalarT< T > in)