/*** Generated by Spin Version 6.5.2 -- 6 December 2019 ***/ /*** From source: my_philosopher.pml ***/ #ifdef SC #define _FILE_OFFSET_BITS 64 #endif #include #include #include #include #include #include #include #if defined(WIN32) || defined(WIN64) #include #else #include #include #endif #include #include #include #include #define Offsetof(X, Y) ((ulong)(&(((X *)0)->Y))) #ifndef max #define max(a,b) (((a)<(b)) ? (b) : (a)) #endif #ifndef PRINTF int Printf(const char *fmt, ...); /* prototype only */ #endif #include "pan.h" char *TrailFile = PanSource; /* default */ char *trailfilename; #ifdef LOOPSTATE double cnt_loops; #endif State A_Root; /* seed-state for cycles */ State now; /* the full state-vector */ #if NQS > 0 short q_flds[NQS+1]; short q_max[NQS+1]; #endif #ifndef XUSAFE uchar q_claim[MAXQ+1]; char *q_name[MAXQ+1]; char *p_name[MAXPROC+1]; #endif #undef C_States #if defined(C_States) && (HAS_TRACK==1) void c_update(uchar *p_t_r) { #ifdef VERBOSE printf("c_update %p\n", p_t_r); #endif } void c_revert(uchar *p_t_r) { #ifdef VERBOSE printf("c_revert %p\n", p_t_r); #endif } #endif void globinit(void) { } #if VECTORSZ>32000 extern int #else extern short #endif *proc_offset, *q_offset; void locinit1(int h) { } void locinit0(int h) { } #ifdef RANDOMIZE #define T_RAND RANDOMIZE #endif #ifdef CNTRSTACK #define onstack_now() (LL[trpt->j6] && LL[trpt->j7]) #define onstack_put() LL[trpt->j6]++; LL[trpt->j7]++ #define onstack_zap() LL[trpt->j6]--; LL[trpt->j7]-- #endif #if !defined(SAFETY) && !defined(NOCOMP) #define V_A (((now._a_t&1)?2:1) << (now._a_t&2)) #define A_V (((now._a_t&1)?1:2) << (now._a_t&2)) int S_A = 0; #else #define V_A 0 #define A_V 0 #define S_A 0 #endif #ifdef MA #undef onstack_now #undef onstack_put #undef onstack_zap #define onstack_put() ; #define onstack_zap() g_store((char *) &now, vsize, 4) #else #if defined(FULLSTACK) && !defined(BITSTATE) #define onstack_put() trpt->ostate = Lstate #define onstack_zap() { \ if (trpt->ostate) \ trpt->ostate->tagged = \ (S_A)? (trpt->ostate->tagged&~V_A) : 0; \ } #endif #endif H_el **H_tab, **S_Tab; /* #ifndef BFS_PAR */ H_el *Lstate; /* #endif */ Trail *trail, *trpt; FILE *efd; uchar *_this; long maxdepth=10000; long omaxdepth=10000; #ifdef BCS /* bitflags in trpt->bcs */ #define B_PHASE1 1 #define B_PHASE2 2 #define B_FORCED 4 int sched_max = 0; #endif double quota; /* time limit */ #if NCORE>1 long z_handoff = -1; #endif #ifdef SC char *stackfile; #endif uchar *SS, *LL; uchar reversing = 0; uchar HASH_NR = 0; double memcnt = (double) 0; double memlim = (double) (1<<30); /* 1 GB */ #if NCORE>1 double mem_reserved = (double) 0; #endif /* for emalloc: */ static char *have; static long left = 0L; static double fragment = (double) 0; static ulong grow; unsigned int HASH_CONST[] = { /* generated by hashgen 421 -- assumes 4 bytes per int */ 0x100d4e63, 0x0fc22f87, 0xa7155c77, 0x78f2c3b9, 0xde32d207, 0xc27d305b, 0x1bb3fb2b, 0x2798c7a5, 0x9c675ffd, 0x777d9081, 0x07aef2f1, 0xae08922f, 0x5bd365b7, 0xed51c47b, 0x9b5aeea1, 0xbcc9d431, 0x396d8fff, 0xa2fd1367, 0x08616521, 0x5e84991f, 0x87495bc5, 0x2930039b, 0xceb6a593, 0xfe522d63, 0x7ff60baf, 0xf89b1fbf, 0x74c01755, 0xe0c559bf, 0x3669fc47, 0x8756d3bf, 0x14f78445, 0x24c41779, 0x0af7b129, 0xde22368d, 0x3e1c01e3, 0xaf773e49, 0x5b762459, 0x86d12911, 0x0953a3af, 0xb66dc23d, 0x96b3bd4f, 0x19b1dd51, 0xd886fbc3, 0xa7f3a025, 0xccb48e63, 0x87d8f027, 0x2bea270d, 0xdb0e9379, 0x78c09f21, 0x0cbbfe07, 0xea4bc7c3, 0x5bfbc3c9, 0x3c6e53fd, 0xab320cdd, 0x31041409, 0x416e7485, 0xe41d75fd, 0xc3c5060f, 0x201a9dc1, 0x93dee72b, 0x6461305f, 0xc571dec5, 0xa1fd21c5, 0xfb421ce1, 0x7f024b05, 0xfa518819, 0x6c9777fb, 0x0d4d9351, 0x08b33861, 0xccb9d0f3, 0x34112791, 0xe962d7c9, 0x8d742211, 0xcd9c47a1, 0x64437b69, 0x5fe40feb, 0x806113cb, 0x10e1d593, 0x821851b3, 0x057a1ff3, 0x8ededc0b, 0x90dd5b31, 0x635ff359, 0x68dbcd35, 0x1050ff4f, 0xdbb07257, 0x486336db, 0x83af1e75, 0x432f1799, 0xc1d0e7e7, 0x21f4eb5b, 0x881ec2c1, 0x23f3b539, 0x6cdfb80d, 0x71d474cf, 0x97d5d4a9, 0xf721d2e5, 0xb5ff3711, 0x3f2e58cd, 0x4e06e3d9, 0x7d711739, 0x927887df, 0x7d57ad71, 0x232eb767, 0xe3f5cc51, 0x6576b443, 0xed17bf1f, 0x8828b637, 0xc940f6ab, 0xc7b830ef, 0x11ed8a13, 0xaff20949, 0xf28a8465, 0x0da10cf9, 0xb512497d, 0x44accae1, 0x95e0929f, 0xe08c8901, 0xfd22d6c9, 0xb6a5c029, 0xaadb428d, 0x6e8a453d, 0x3d5c0195, 0x8bf4ae39, 0xbf83ab19, 0x3e9dac33, 0xc4df075d, 0x39472d71, 0xb8647725, 0x1a6d4887, 0x78a03577, 0xafd76ef7, 0xc1a1d6b3, 0x1afb33c5, 0x87896299, 0x5cc992ef, 0x7f805d0d, 0x089a039b, 0xa353cc27, 0x57b296b3, 0x52badec9, 0xc916e431, 0x09171957, 0x14996d51, 0xe87e32c7, 0xb4fdbb5d, 0xdd216a03, 0x4ddd3fff, 0x767d5c57, 0x79c97509, 0xab70543b, 0xc5feca4f, 0x8eb37b89, 0x20a2cefd, 0xf4b00b91, 0xf166593d, 0x7bf50f65, 0x753e6c8b, 0xfb5b81dd, 0xf2d45ef5, 0x9741c04f, 0x300da48d, 0x01dc4121, 0xa112cd47, 0x0223b24b, 0xa89fbce7, 0x681e1f7b, 0xe7c6aedf, 0x1fd3d523, 0x561ba723, 0xf54042fb, 0x1a516751, 0xcd085bd5, 0xe74246d5, 0x8b170b5d, 0x249985e9, 0x5b4d9cf7, 0xe9912323, 0x5fc0f339, 0x41f8f051, 0x8a296fb1, 0x62909f51, 0x2c05d695, 0x095efccb, 0xa91574f1, 0x0f5cc6c3, 0x23a2ca2b, 0xc6053ec1, 0xeb19e081, 0x3d1b3997, 0xb0c5f3cd, 0xe5d85b35, 0x1cb1bdf1, 0x0c8f278f, 0x518249c3, 0x9f61b68d, 0xade0919d, 0x779e29c3, 0xdbac9485, 0x2ce149a9, 0x254c2409, 0x205b34fb, 0xc8ab1a89, 0x6b4a2585, 0x2303d94b, 0x8fa186b9, 0x49826da5, 0xd23a37ad, 0x680b18c9, 0xa46fbd7f, 0xe42c2cf9, 0xf7cfcb5f, 0xb4842b8b, 0xe483780d, 0x66cf756b, 0x3eb73781, 0x41ca17a5, 0x59f91b0f, 0x92fb67d9, 0x0a5c330f, 0x46013fdb, 0x3b0634af, 0x9024f533, 0x96a001a7, 0x15bcd793, 0x3a311fb1, 0x78913b8b, 0x9d4a5ddf, 0x33189b31, 0xa99e8283, 0xf7cb29e9, 0x12d64a27, 0xeda770ff, 0xa7320297, 0xbd3c14a5, 0x96d0156f, 0x0115db95, 0x7f79f52b, 0xa6d52521, 0xa063d4bd, 0x9cb5e039, 0x42cf8195, 0xcb716835, 0x1bc21273, 0x5a67ad27, 0x4b3b0545, 0x162cda67, 0x0489166b, 0x85fd06a9, 0x562b037d, 0x995bc1f3, 0xe144e78b, 0x1e749f69, 0xa36df057, 0xcfee1667, 0x8c4116b7, 0x94647fe3, 0xe6899df7, 0x6d218981, 0xf1069079, 0xd1851a33, 0xf424fc83, 0x24467005, 0xad8caf59, 0x1ae5da13, 0x497612f9, 0x10f6d1ef, 0xeaf4ff05, 0x405f030b, 0x693b041d, 0x2065a645, 0x9fec71b3, 0xc3bd1b0f, 0xf29217a3, 0x0f25e15d, 0xd48c2b97, 0xce8acf2d, 0x0629489b, 0x1a5b0e01, 0x32d0c059, 0x2d3664bf, 0xc45f3833, 0xd57f551b, 0xbdd991c5, 0x9f97da9f, 0xa029c2a9, 0x5dd0cbdf, 0xe237ba41, 0x62bb0b59, 0x93e7d037, 0x7e495619, 0x51b8282f, 0x853e8ef3, 0x9b8abbeb, 0x055f66f9, 0x2736f7e5, 0x8d7e6353, 0x143abb65, 0x4e2bb5b3, 0x872e1adf, 0x8fcac853, 0xb7cf6e57, 0x12177f3d, 0x1d2da641, 0x07856425, 0xc0ed53dd, 0x252271d9, 0x79874843, 0x0aa8c9b5, 0x7e804f93, 0x2d080e09, 0x3929ddfd, 0x36433dbd, 0xd6568c17, 0xe624e939, 0xb33189ef, 0x29e68bff, 0x8aae2433, 0xe6335499, 0xc5facd9d, 0xbd5afc65, 0x7a584fa7, 0xab191435, 0x64bbdeef, 0x9f5cd8e1, 0xb3a1be05, 0xbd0c1753, 0xb00e2c7f, 0x6a96e315, 0x96a31589, 0x660af5af, 0xc0438d43, 0x17637373, 0x6460e8df, 0x7d458de9, 0xd76b923f, 0x316f045f, 0x3ccbd035, 0x63f64d81, 0xd990d969, 0x7c860a93, 0x99269ff7, 0x6fbcac8f, 0xd8cc562b, 0x67141071, 0x09f85ea3, 0x1298f2dd, 0x41fa86e5, 0xce1d7cf5, 0x6b232c9d, 0x8f093d4b, 0x3203ad4b, 0x07d70d5f, 0x38c44c75, 0x0887c9ef, 0x1833acf5, 0xa3607f85, 0x7d367573, 0x0ea4ffc3, 0xad2d09c1, 0x7a1e664f, 0xef41dff5, 0x03563491, 0x67f30a1f, 0x5ce5f9ef, 0xa2487a27, 0xe5077957, 0x9beb36fd, 0x16e41251, 0x216799ef, 0x07181f8d, 0xc191c3cf, 0xba21cab5, 0x73944eb7, 0xdf9eb69d, 0x5fef6cfd, 0xd750a6f5, 0x04f3fa43, 0x7cb2d063, 0xd3bdb369, 0x35f35981, 0x9f294633, 0x5e293517, 0x70e51d05, 0xf8db618d, 0x66ee05db, 0x835eaa77, 0x166a02c3, 0xb516f283, 0x94102293, 0x1ace50a5, 0x64072651, 0x66df7b75, 0x02e1b261, 0x8e6a73b9, 0x19dddfe7, 0xd551cf39, 0x391c17cb, 0xf4304de5, 0xcd67b8d1, 0x25873e8d, 0x115b4c71, 0x36e062f3, 0xaec0c7c9, 0xd929f79d, 0x935a661b, 0xda762b47, 0x170bd76b, 0x1a955cb5, 0x341fb0ef, 0x7f366cef, 0xc98f60c7, 0xa4181af3, 0xa94a8837, 0x5fa3bc43, 0x11c638c1, 0x4e66fabb, 0x30ab85cf, 0x250704ef, 0x8bf3bc07, 0x6d2cd5ab, 0x613ef9c3, 0xb8e62149, 0x0404fd91, 0xa04fd9b1, 0xa5e389eb, 0x9543bd23, 0xad6ca1f9, 0x210c49ab, 0xf8e9532b, 0x854fba89, 0xdc7fc6bb, 0x48a051a7, 0x6b2f383b, 0x61a4b433, 0xd3af231b, 0xc5023fc7, 0xa5aa85df, 0xa0cd1157, 0x4206f64d, 0x3fea31c3, 0x62d510a1, 0x13988957, 0x6a11a033, 0x46f2a3b7, 0x2784ef85, 0x229eb9eb, 0x9c0c3053, 0x5b7ead39, 0x82ae9afb, 0xf99e9fb3, 0x914b6459, 0xaf05edd7, 0xc82710dd, 0x8fc1ea1f, 0x7e0d7a8d, 0x7c7592e9, 0x65321017, 0xea57553f, 0x4aeb49ff, 0x5239ae4d, 0x4b4b4585, 0x94091c21, 0x7eaaf4cb, 0x6b489d6f, 0xecb9c0c3, 0x29a7af63, 0xaf117a0d, 0x969ea6cd, 0x7658a34d, 0x5fc0bba9, 0x26e99b7f, 0x7a6f260f, 0xe37c34f1, 0x1a1569bb, 0xc3bc7371, 0x8567543d, 0xad0c46a9, 0xa1264fd9, 0x16f10b29, 0x5e00dd3b, 0xf85b6bcd, 0xa2d32d8b, 0x4a3c8d43, 0x6b33b959, 0x4fd1e6c9, 0x7938b8a9, 0x1ec795c7, 0xe2ef3409, 0x83c16b9d, 0x0d3fd9eb, 0xeb461ad7, 0xb09c831d, 0xaf052001, 0x7911164d, 0x1a9dc191, 0xb52a0815, 0x0f732157, 0xc68c4831, 0x12cf3cbb }; #if NCORE>1 extern int core_id; #endif long mreached=0; ulong errors=0; int done=0, Nrun=1; int c_init_done=0; char *c_stack_start = (char *) 0; double nstates=0, nlinks=0, truncs=0, truncs2=0; double nlost=0, nShadow=0, hcmp=0, ngrabs=0; #ifdef BFS_PAR extern ulong bfs_punt; #endif #ifdef PUTPID char *progname; #endif #if defined(ZAPH) && defined(BITSTATE) double zstates = 0; #endif /* int c_init_run; */ #ifdef REVERSE #define P_REVERSE #endif #ifdef T_REVERSE int t_reverse = 1; #else int t_reverse = 0; #endif #ifdef BFS double midrv=0, failedrv=0, revrv=0; #endif ulong nr_states=0; /* nodes in DFA */ long Fa=0, Fh=0, Zh=0, Zn=0; long PUT=0, PROBE=0, ZAPS=0; long Ccheck=0, Cholds=0; int a_cycles=0, upto=1, strict=0, verbose = 0, signoff = 0; #ifdef HAS_CODE int gui = 0, coltrace = 0, readtrail = 0; int whichtrail = 0, whichclaim = -1, onlyproc = -1, silent = 0; char *claimname; #endif int state_tables=0, fairness=0, no_rck=0, Nr_Trails=0, dodot=0; char simvals[256]; #ifndef INLINE int TstOnly=0; #endif ulong mask, nmask; #ifdef BITSTATE int ssize=27; /* 16 Mb */ #else int ssize=24; /* 16M slots */ #endif int hmax=0, svmax=0, smax=0; int Maxbody=0, XX; uchar *noptr, *noqptr; /* used by Pptr(x) and Qptr(x) */ #ifdef VAR_RANGES void logval(char *, int); void dumpranges(void); #endif #ifdef MA #define INLINE_REV extern void dfa_init(unsigned short); extern int dfa_member(ulong); extern int dfa_store(uchar *); unsigned int maxgs = 0; #endif #ifdef ALIGNED State comp_now __attribute__ ((aligned (8))); /* gcc 64-bit aligned for Itanium2 systems */ /* MAJOR runtime penalty if not used on those systems */ #else State comp_now; /* compressed state vector */ #endif #ifndef HC #ifdef BFS_PAR State tmp_msk; #endif State comp_msk; uchar *Mask = (uchar *) &comp_msk; #endif #ifdef COLLAPSE State comp_tmp; static char *scratch = (char *) &comp_tmp; #endif _Stack *stack; /* for queues, processes */ Svtack *svtack; /* for old state vectors */ #ifdef BITSTATE static unsigned int hfns = 3; /* new default */ #endif static ulong j1_spin, j2_spin; /* 5.2.1: avoid nameclash with math.h */ static ulong j3_spin, j4_spin; ulong K1, K2; #ifdef BITSTATE long udmem; #endif #ifndef BFS_PAR static long A_depth = 0; #endif long depth = 0; long depthfound = -1; /* loop detection */ #if NCORE>1 long nr_handoffs = 0; #endif uchar warned = 0, iterative = 0, exclusive = 0, like_java = 0, every_error = 0; uchar noasserts = 0, noends = 0, bounded = 0; uint s_rand = 12345; /* default seed */ #if SYNC>0 && ASYNC==0 void set_recvs(void); int no_recvs(int); #endif #if SYNC #define IfNotBlocked if (boq != -1) continue; #define UnBlock boq = -1 #else #define IfNotBlocked /* cannot block */ #define UnBlock /* don't bother */ #endif #ifdef BITSTATE int (*b_store)(char *, int); int bstore_reg(char *, int); int bstore_mod(char *, int); #endif void dfs_uerror(char *); void dfs_Uerror(char *); #ifdef BFS_PAR void bfs_uerror(char *); void bfs_Uerror(char *); #endif void (*uerror)(char *); void (*Uerror)(char *); void (*hasher)(uchar *, int); void (*o_hash)(uchar *, int, int); void d_hash(uchar *, int); void m_hash(uchar *, int); void d_sfh(uchar *, int); void o_hash32(uchar *, int, int); void o_hash64(uchar *, int, int); void active_procs(void); void cleanup(void); void do_the_search(void); void find_shorter(int); void iniglobals(int); void stopped(int); void wrapup(void); int *grab_ints(int); void ungrab_ints(int *, int); #ifdef COLLAPSE #if (NCORE>1 && !defined(SEP_STATE)) || defined(BFS_PAR) volatile ulong *ncomps; /* in shared memory */ #else ulong ncomps[256+2]; #endif #endif Trans ***trans; /* 1 ptr per state per proctype */ #if VECTORSZ>32000 int P_o[MAXPROC], P_o_tmp[MAXPROC+1]; int Q_o[MAXQ], Q_o_tmp[MAXPROC+1]; int *proc_offset = (int *) P_o; int *q_offset = (int *) Q_o; #else short P_o[MAXPROC], P_o_tmp[MAXPROC+1]; short Q_o[MAXQ], Q_o_tmp[MAXPROC+1]; short *proc_offset = (short *) P_o; short *q_offset = (short *) Q_o; #endif uchar P_s[MAXPROC+1], P_s_tmp[MAXPROC+1]; uchar Q_s[MAXQ+1], Q_s_tmp[MAXQ+1]; uchar *proc_skip = (uchar *) P_s; uchar *q_skip = (uchar *) Q_s; #ifdef TRIX TRIX_v6 *freebodies; TRIX_v6 *processes[MAXPROC+1]; TRIX_v6 *channels[MAXQ+1]; long _p_count[MAXPROC]; long _c_count[MAXPROC]; #endif ulong vsize; /* vector size in bytes */ #ifdef SVDUMP int vprefix=0, svfd; /* runtime option -pN */ #endif char *tprefix = "trail"; /* runtime option -tsuffix */ short boq = -1; /* blocked_on_queue status */ int _; /* predefined write-only variable */ #ifdef PEG long peg[NTRANS]; #endif #ifndef NOBOUNDCHECK #define Index(x, y) Boundcheck(x, y, II, tt, t) #else #define Index(x, y) x #endif short src_ln1 [] = { 0, 60, 60, 60, 61, 61, 59, 63, 59, 63, 67, 67, 67, 68, 68, 66, 70, 66, 65, 71, 0, }; S_F_MAP src_file1 [] = { { "-", 0, 0 }, { "my_philosopher.pml", 1, 19 }, { "-", 20, 21 } }; uchar reached1 [] = { 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, }; uchar *loopstate1; short src_ln0 [] = { 0, 23, 25, 26, 24, 29, 30, 28, 33, 34, 35, 36, 37, 38, 40, 41, 39, 44, 45, 43, 48, 49, 50, 51, 52, 22, 54, 22, 54, 0, }; S_F_MAP src_file0 [] = { { "-", 0, 0 }, { "my_philosopher.pml", 1, 28 }, { "-", 29, 30 } }; uchar reached0 [] = { 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, }; uchar *loopstate0; uchar reached2[3]; /* np_ */ uchar *loopstate2; /* np_ */ struct { int tp; short *src; } src_all[] = { { 1, &src_ln1[0] }, { 0, &src_ln0[0] }, { 0, (short *) 0 } }; S_F_MAP *flref[] = { src_file1, src_file0 }; struct { char *c; char *t; } code_lookup[] = { { (char *) 0, "" } }; short Air[] = { (short) Air0, (short) Air1, (short) Air2 }; char *procname[] = { "Philosopher", ":init:", ":np_:", 0 }; enum btypes { NONE=0, N_CLAIM=1, I_PROC=2, A_PROC=3, P_PROC=4, E_TRACE=5, N_TRACE=6 }; int Btypes[] = { 4, /* Philosopher */ 2, /* :init: */ 0 /* :np_: */ }; uchar spin_c_typ[NCLAIMS]; /* claim-types */ uchar *accpstate[3]; uchar *progstate[3]; uchar *loopstate[3]; uchar *reached[3]; uchar *stopstate[3]; uchar *visstate[3]; short *mapstate[3]; #ifdef HAS_CODE int NrStates[3]; #endif #ifdef TRIX int what_p_size(int); int what_q_size(int); void re_mark_all(int whichway) { int j; #ifdef V_TRIX printf("%d: re_mark_all channels %d\n", depth, whichway); #endif #ifndef BFS for (j = 0; j < now._nr_qs; j++) channels[j]->modified = 1; /* channel index moved */ #endif #ifndef TRIX_ORIG if (whichway > 0) { for (j = now._nr_pr + now._nr_qs - 1; j >= now._nr_pr; j--) now._ids_[j] = now._ids_[j-1]; } else { for (j = now._nr_pr; j < now._nr_pr + now._nr_qs; j++) now._ids_[j] = now._ids_[j+1]; } #endif } #endif #ifdef BFS_PAR void bfs_prepmask(int caller) { #if !defined(NOCOMP) && !defined(HC) memcpy((char *) &tmp_msk, (const char *) Mask, sizeof(State)); Mask = (uchar *) &tmp_msk; #endif switch (caller) { case 1: /* addproc */ #if VECTORSZ>32000 memcpy((char *) P_o_tmp, (const char *) proc_offset, MAXPROC*sizeof(int)); #else memcpy((char *) P_o_tmp, (const char *) proc_offset, MAXPROC*sizeof(short)); #endif memcpy((char *) P_s_tmp, (const char *) proc_skip, MAXPROC*sizeof(uchar)); proc_offset = P_o_tmp; proc_skip = (uchar *) &P_s_tmp[0]; break; case 2: /* addqueue */ #if VECTORSZ>32000 memcpy((char *) Q_o_tmp, (const char *) q_offset, MAXQ*sizeof(int)); #else memcpy((char *) Q_o_tmp, (const char *) q_offset, MAXQ*sizeof(short)); #endif memcpy((char *) Q_s_tmp, (const char *) q_skip, MAXQ*sizeof(uchar)); q_offset = Q_o_tmp; q_skip = (uchar *) &Q_s_tmp[0]; break; case 3: /* no nothing */ break; default: /* cannot happen */ Uerror("no good"); break; } } typedef struct BFS_saves BFS_saves; struct BFS_saves { char *m; BFS_saves *nxt; } *bfs_save_po, *bfs_save_ps, #if !defined(NOCOMP) && !defined(HC) *bfs_save_mask, #endif *bfs_save_qo, *bfs_save_qs; extern volatile uchar *sh_malloc(ulong); static int bfs_runs; /* 0 before local heaps are initialized */ void bfs_swoosh(BFS_saves **where, char **what, int howmuch) { BFS_saves *m; for (m = *where; m; m = m->nxt) { if (memcmp(m->m, *what, howmuch) == 0) { *what = m->m; return; } } m = (BFS_saves *) emalloc(sizeof(BFS_saves)); if (bfs_runs) { m->m = (char *) sh_malloc(howmuch); } else { m->m = (char *) sh_pre_malloc(howmuch); } memcpy(m->m, *what, howmuch); m->nxt = *where; *where = m; *what = m->m; } void bfs_fixmask(int caller) { #if !defined(NOCOMP) && !defined(HC) bfs_swoosh(&bfs_save_mask, (char **) &Mask, sizeof(State)); #endif #ifndef TRIX switch (caller) { case 1: /* addproc */ #if VECTORSZ>32000 bfs_swoosh(&bfs_save_po, (char **) &proc_offset, MAXPROC*sizeof(int)); #else bfs_swoosh(&bfs_save_po, (char **) &proc_offset, MAXPROC*sizeof(short)); #endif bfs_swoosh(&bfs_save_ps, (char **) &proc_skip, MAXPROC*sizeof(uchar)); break; case 2: /* addqueue */ #if VECTORSZ>32000 bfs_swoosh(&bfs_save_qo, (char **) &q_offset, MAXQ*sizeof(int)); #else bfs_swoosh(&bfs_save_qo, (char **) &q_offset, MAXQ*sizeof(short)); #endif bfs_swoosh(&bfs_save_qs, (char **) &q_skip, MAXQ*sizeof(uchar)); break; case 3: /* do nothing */ break; default: Uerror("double plus ungood"); break; } #endif } #endif int addproc(int calling_pid, int priority, int n, int par0) { int j = 0, h = now._nr_pr; #ifndef NOCOMP int k; #endif uchar *o_this = _this; #ifndef INLINE if (TstOnly) return (h < MAXPROC); #endif #ifndef NOBOUNDCHECK /* redefine Index only within this procedure */ #undef Index #define Index(x, y) Boundcheck(x, y, 0, 0, 0) #endif if (h >= MAXPROC) Uerror("too many processes"); #ifdef V_TRIX printf("%4d: add process %d\n", depth, h); #endif switch (n) { case 0: j = sizeof(P0); break; case 1: j = sizeof(P1); break; case 2: j = sizeof(P2); break; default: Uerror("bad proc - addproc"); } #ifdef BFS_PAR bfs_prepmask(1); /* addproc */ #endif #ifdef TRIX vsize += sizeof(H_el *); #else if (vsize%WS) proc_skip[h] = WS-(vsize%WS); else proc_skip[h] = 0; #if !defined(NOCOMP) && !defined(HC) for (k = vsize + (int) proc_skip[h]; k > vsize; k--) Mask[k-1] = 1; /* align */ #endif vsize += (int) proc_skip[h]; proc_offset[h] = vsize; vsize += j; #if defined(SVDUMP) && defined(VERBOSE) if (vprefix > 0) { int dummy = 0; write(svfd, (uchar *) &dummy, sizeof(int)); /* mark */ write(svfd, (uchar *) &h, sizeof(int)); write(svfd, (uchar *) &n, sizeof(int)); #if VECTORSZ>32000 write(svfd, (uchar *) &proc_offset[h], sizeof(int)); write(svfd, (uchar *) &now, vprefix-4*sizeof(int)); /* padd */ #else write(svfd, (uchar *) &proc_offset[h], sizeof(short)); write(svfd, (uchar *) &now, vprefix-3*sizeof(int)-sizeof(short)); /* padd */ #endif } #endif #endif now._nr_pr += 1; #if defined(BCS) && defined(CONSERVATIVE) if (now._nr_pr >= CONSERVATIVE*8) { printf("pan: error: too many processes -- recompile with "); printf("-DCONSERVATIVE=%d\n", CONSERVATIVE+1); pan_exit(1); } #endif if (fairness && ((int) now._nr_pr + 1 >= (8*NFAIR)/2)) { printf("pan: error: too many processes -- current"); printf(" max is %d procs (-DNFAIR=%d)\n", (8*NFAIR)/2 - 2, NFAIR); printf("\trecompile with -DNFAIR=%d\n", NFAIR+1); pan_exit(1); } #ifndef NOVSZ now._vsz = vsize; #endif hmax = max(hmax, vsize); #ifdef TRIX #ifndef BFS if (freebodies) { processes[h] = freebodies; freebodies = freebodies->nxt; } else { processes[h] = (TRIX_v6 *) emalloc(sizeof(TRIX_v6)); processes[h]->body = (uchar *) emalloc(Maxbody * sizeof(char)); } processes[h]->modified = 1; /* addproc */ #endif processes[h]->psize = j; processes[h]->parent_pid = calling_pid; processes[h]->nxt = (TRIX_v6 *) 0; #else #if !defined(NOCOMP) && !defined(HC) for (k = 1; k <= Air[n]; k++) Mask[vsize - k] = 1; /* pad */ Mask[vsize-j] = 1; /* _pid */ #endif #ifdef BFS_PAR bfs_fixmask(1); /* addproc */ #endif if (vsize >= VECTORSZ) { printf("pan: error, VECTORSZ too small, recompile pan.c"); printf(" with -DVECTORSZ=N with N>%d\n", (int) vsize); Uerror("aborting"); } #endif memset((char *)pptr(h), 0, j); _this = pptr(h); if (BASE > 0 && h > 0) { ((P0 *)_this)->_pid = h-BASE; } else { ((P0 *)_this)->_pid = h; } switch (n) { case 2: /* np_ */ ((P2 *)pptr(h))->_t = 2; ((P2 *)pptr(h))->_p = 0; #ifdef HAS_PRIORITY ((P2 *)pptr(h))->_priority = priority; #endif reached2[0] = 1; accpstate[2][1] = 1; break; case 1: /* :init: */ ((P1 *)pptr(h))->_t = 1; ((P1 *)pptr(h))->_p = 6; #ifdef HAS_PRIORITY ((P1 *)pptr(h))->_priority = priority; /* was: 1 */ #endif reached1[6]=1; /* params: */ /* locals: */ ((P1 *)pptr(h))->k = 0; #ifdef VAR_RANGES logval(":init::k", ((P1 *)pptr(h))->k); #endif #ifdef HAS_CODE locinit1(h); #endif break; case 0: /* Philosopher */ ((P0 *)pptr(h))->_t = 0; ((P0 *)pptr(h))->_p = 25; #ifdef HAS_PRIORITY ((P0 *)pptr(h))->_priority = priority; /* was: 1 */ #endif reached0[25]=1; /* params: */ ((P0 *)pptr(h))->i = par0; /* locals: */ #ifdef VAR_RANGES logval("Philosopher:i", ((P0 *)pptr(h))->i); #endif #ifdef HAS_CODE locinit0(h); #endif break; } _this = o_this; #ifdef TRIX re_mark_all(1); /* addproc */ #endif return h-BASE; #ifndef NOBOUNDCHECK #undef Index #define Index(x, y) Boundcheck(x, y, II, tt, t) #endif } #if defined(BITSTATE) && defined(COLLAPSE) /* just to allow compilation, to generate the error */ long col_p(int i, char *z) { return 0; } long col_q(int i, char *z) { return 0; } #endif #ifndef BITSTATE #ifdef COLLAPSE long col_p(int i, char *z) { int j, k; ulong ordinal(char *, long, short); char *x, *y; P0 *ptr = (P0 *) pptr(i); switch (ptr->_t) { case 0: j = sizeof(P0); break; case 1: j = sizeof(P1); break; case 2: j = sizeof(P2); break; default: Uerror("bad proctype - collapse"); } if (z) x = z; else x = scratch; y = (char *) ptr; k = proc_offset[i]; #if !defined(NOCOMP) && !defined(HC) for ( ; j > 0; j--, y++) if (!Mask[k++]) *x++ = *y; #else memcpy(x, y, j); x += j; #endif for (j = 0; j < WS-1; j++) *x++ = 0; x -= j; if (z) return (long) (x - z); return ordinal(scratch, x-scratch, (short) (2+ptr->_t)); } #endif #endif #ifdef INIT_STATE void init_state(State state){ state._nr_pr = 0; state._nr_qs = 0; state._a_t = 0; #ifndef NOFAIR memset(&state._cnt, 0, sizeof(state._cnt)); #endif #ifndef NOVSZ state._vsz = 0; #endif #ifdef HAS_LAST state._last = 0; #endif #if defined(BITSTATE) && defined(BCS) && defined(STORE_CTX) state._ctx = 0; #endif #if defined(BFS_PAR) && defined(L_BOUND) state._l_bnd = 0; state._l_sds = 0; #endif #ifdef EVENT_TRACE state_event = 0; #endif #ifdef TRIX memset(&state._ids_, 0, sizeof(state._ids_)); #else memset(&state.sv, 0, sizeof(state.sv)); #endif } #endif void run(void) { /* int i; */ #ifdef INIT_STATE init_state(now); #else memset((char *)&now, 0, sizeof(State)); #endif vsize = (ulong) (sizeof(State) - VECTORSZ); #ifndef NOVSZ now._vsz = vsize; #endif #ifdef TRIX if (VECTORSZ != sizeof(now._ids_)) { printf("VECTORSZ is %d, but should be %d in this mode\n", VECTORSZ, (int) sizeof(now._ids_)); Uerror("VECTORSZ set incorrectly, recompile Spin (not pan.c)"); } #endif /* optional provisioning statements, e.g. to */ /* set hidden variables, used as constants */ #ifdef PROV #include PROV #endif settable(); Maxbody = max(Maxbody, ((int) sizeof(P0))); Maxbody = max(Maxbody, ((int) sizeof(P1))); Maxbody = max(Maxbody, ((int) sizeof(P2))); reached[0] = reached0; reached[1] = reached1; reached[2] = reached2; accpstate[0] = (uchar *) emalloc(_nstates0); accpstate[1] = (uchar *) emalloc(_nstates1); accpstate[2] = (uchar *) emalloc(_nstates2); progstate[0] = (uchar *) emalloc(_nstates0); progstate[1] = (uchar *) emalloc(_nstates1); progstate[2] = (uchar *) emalloc(_nstates2); loopstate0 = loopstate[0] = (uchar *) emalloc(_nstates0); loopstate1 = loopstate[1] = (uchar *) emalloc(_nstates1); loopstate2 = loopstate[2] = (uchar *) emalloc(_nstates2); stopstate[0] = (uchar *) emalloc(_nstates0); stopstate[1] = (uchar *) emalloc(_nstates1); stopstate[2] = (uchar *) emalloc(_nstates2); visstate[0] = (uchar *) emalloc(_nstates0); visstate[1] = (uchar *) emalloc(_nstates1); visstate[2] = (uchar *) emalloc(_nstates2); mapstate[0] = (short *) emalloc(_nstates0 * sizeof(short)); mapstate[1] = (short *) emalloc(_nstates1 * sizeof(short)); mapstate[2] = (short *) emalloc(_nstates2 * sizeof(short)); stopstate[0][_endstate0] = 1; stopstate[1][_endstate1] = 1; stopstate[2][_endstate2] = 1; #ifdef HAS_CODE NrStates[0] = _nstates0; NrStates[1] = _nstates1; NrStates[2] = _nstates2; #endif Maxbody = max(Maxbody, sizeof(State)-VECTORSZ); if ((Maxbody % WS) != 0) Maxbody += WS - (Maxbody % WS); retrans(0, _nstates0, _start0, src_ln0, reached0, loopstate0); retrans(1, _nstates1, _start1, src_ln1, reached1, loopstate1); if (state_tables) { if (dodot) exit(0); printf("\nTransition Type: "); printf("A=atomic; D=d_step; L=local; G=global\n"); printf("Source-State Labels: "); printf("p=progress; e=end; a=accept;\n"); #ifdef MERGED printf("Note: statement merging was used. Only the first\n"); printf(" stmnt executed in each merge sequence is shown\n"); printf(" (use spin -a -o3 to disable statement merging)\n"); #endif pan_exit(0); } #if defined(BFS) && defined(TRIX) { int i; for (i = 0; i < MAXPROC+1; i++) { processes[i] = (TRIX_v6 *) emalloc(sizeof(TRIX_v6)); processes[i]->body = (uchar *) emalloc(Maxbody * sizeof(char)); } for (i = 0; i < MAXQ+1; i++) { channels[i] = (TRIX_v6 *) emalloc(sizeof(TRIX_v6)); channels[i]->body = (uchar *) emalloc(Maxbody * sizeof(char)); } } #endif #ifdef BFS_PAR bfs_setup_mem(); #ifdef COLLAPSE /* this must be the very first allocation from the shared heap */ #ifdef BFS_SEP_HASH ncomps = (ulong *) emalloc((ulong)((256+2) * sizeof(ulong))); #else ncomps = (ulong *) sh_pre_malloc((ulong)((256+2) * sizeof(ulong))); #endif #endif #endif iniglobals(258); /* arg outside range of pids */ #if defined(VERI) && !defined(NOREDUCE) && !defined(NP) && !defined(BFS) && !defined(HAS_LTL) if (!state_tables #ifdef HAS_CODE && !readtrail #endif #if NCORE>1 && core_id == 0 #endif ) { printf("warning: for p.o. reduction to be valid "); printf("the never claim must be stutter-invariant\n"); printf("(never claims generated from LTL "); printf("formulae are stutter-invariant)\n"); } #endif UnBlock; /* disable rendez-vous */ #ifdef BITSTATE sinit(); #else hinit(); #endif #if defined(FULLSTACK) && defined(BITSTATE) onstack_init(); #endif #if defined(CNTRSTACK) && !defined(BFS) LL = (uchar *) emalloc(ONE_L<<(ssize-3)); #endif stack = (_Stack *) emalloc(sizeof(_Stack)); svtack = (Svtack *) emalloc(sizeof(Svtack)); /* a place to point for Pptr of non-running procs: */ noqptr = noptr = (uchar *) emalloc(Maxbody * sizeof(char)); #if defined(SVDUMP) && defined(VERBOSE) if (vprefix > 0) (void) write(svfd, (uchar *) &vprefix, sizeof(int)); #endif #ifdef VERI Addproc(VERI,1); /* pid = 0, priority 1 */ #if NCLAIMS>1 if (claimname != NULL) { whichclaim = find_claim(claimname); select_claim(whichclaim); } #endif #endif active_procs(); /* started after never */ #ifdef EVENT_TRACE now._event = start_event; reached[EVENT_TRACE][start_event] = 1; #endif #ifdef HAS_CODE globinit(); #endif #ifdef BITSTATE go_again: #endif do_the_search(); #ifdef BITSTATE if (--Nrun > 0 && HASH_CONST[++HASH_NR]) { printf("Run %d:\n", HASH_NR); wrap_stats(); printf("\n"); if (udmem) /* Dillinger 3/2/09 */ { memset(SS, 0, udmem); } else { memset(SS, 0, ONE_L<<(ssize-3)); } #ifdef CNTRSTACK memset(LL, 0, ONE_L<<(ssize-3)); #endif #ifdef FULLSTACK memset((uchar *) S_Tab, 0, maxdepth*sizeof(H_el *)); #endif nstates=nlinks=truncs=truncs2=ngrabs = 0; nlost=nShadow=hcmp = 0; Fa=Fh=Zh=Zn = 0; PUT=PROBE=ZAPS=Ccheck=Cholds = 0; goto go_again; } #endif } #ifdef HAS_PRIORITY extern int highest_priority(int, short, Trans *); extern int get_priority(int); extern int set_priority(int, int); #endif #ifdef SPIN_HEAP void * spin_malloc(int n) /* reserved for use by Modex generated models */ { char *spin_heap_ptr = &(now.spin_heap[now.spin_heap_n]); if (now.spin_heap_n + n >= sizeof(now.spin_heap)) { Uerror("spin_heap limit reached"); } now.spin_heap_n += n; return spin_heap_ptr; } void spin_free(void *unused) { unused; /* ignore */ } #endif int spin_join(int p, void **unused) { /* fprintf(stderr, "join %d when %d\n ", p, now._nr_pr); */ return (now._nr_pr <= p); /* process *p has stopped */ } int spin_mutex_free(int *m) { return (*m == 0); } int spin_mutex_lock(int *m) { *m = 1; return 1; } void spin_mutex_destroy(int *m) { *m = 0; } void spin_mutex_unlock(int *m) { *m = 0; } void spin_mutex_init(int *m, void *val) { if (!val) { *m = 0; } else { Uerror("pthread_mutex_init: unsupported non-default init"); } } int spin_cond_wait(int *cond, int *lck) { /* this version does not scale very far alas */ if (((P0 *)_this)->_pid + 1 >= WS*8) { Uerror("pid exceeds range supported by pthread_cond_wait"); } if (((*cond)&1) == 0) { spin_mutex_unlock(lck); *cond |= (1<<(((P0 *)_this)->_pid + 1)); return 0; } else { /* if other processes are already waiting */ /* while our wait flag is 0, then they should go first */ if (((*cond)&(~(1 | (1<<(((P0 *)_this)->_pid + 1))))) != 0) { spin_mutex_unlock(lck); return 0; } *cond &= ~1; *cond &= ~(1<<(((P0 *)_this)->_pid + 1)); return 1; } } void spin_cond_signal(int *cond) { if ( ((*cond)&(~1)) != 0 ) { *cond |= 1; } } #ifdef HAS_PROVIDED int provided(int, uchar, int, Trans *); #endif #ifdef BFS_PAR extern void bfs_shutdown(const char *); #endif #if NCORE>1 #define GLOBAL_LOCK (0) #ifndef CS_N #define CS_N (256*NCORE) #endif #ifdef NGQ #define NR_QS (NCORE) #define CS_NR (CS_N+1) /* 2^N + 1, nr critical sections */ #define GQ_RD GLOBAL_LOCK #define GQ_WR GLOBAL_LOCK #define CS_ID (1 + (int) (j1_spin & (CS_N-1))) /* mask: 2^N - 1, zero reserved */ #define QLOCK(n) (1+n) #else #define NR_QS (NCORE+1) #define CS_NR (CS_N+3) #define GQ_RD (1) #define GQ_WR (2) #define CS_ID (3 + (int) (j1_spin & (CS_N-1))) #define QLOCK(n) (3+n) #endif #ifndef SEP_STATE #define enter_critical(w) e_critical(w) #define leave_critical(w) x_critical(w) #else #ifdef NGQ #define enter_critical(w) { if (w < 1+NCORE) e_critical(w); } #define leave_critical(w) { if (w < 1+NCORE) x_critical(w); } #else #define enter_critical(w) { if (w < 3+NCORE) e_critical(w); } #define leave_critical(w) { if (w < 3+NCORE) x_critical(w); } #endif #endif int cpu_printf(const char *fmt, ...) { va_list args; enter_critical(GLOBAL_LOCK); /* printing */ printf("cpu%d: ", core_id); fflush(stdout); va_start(args, fmt); vprintf(fmt, args); va_end(args); fflush(stdout); leave_critical(GLOBAL_LOCK); return 1; } #else #define enter_critical(w) /* none */ #define leave_critical(w) /* none */ int cpu_printf(const char *fmt, ...) { va_list args; va_start(args, fmt); vprintf(fmt, args); va_end(args); return 1; } #endif int Printf(const char *fmt, ...) { /* Make sure the args to Printf * are always evaluated (e.g., they * could contain a run stmnt) * but do not generate the output * during verification runs * unless explicitly wanted * If this fails on your system * compile SPIN itself -DPRINTF * and this code is not generated */ #ifdef HAS_CODE if (readtrail) { va_list args; va_start(args, fmt); vprintf(fmt, args); va_end(args); return 1; } #endif #ifdef PRINTF va_list args; va_start(args, fmt); vprintf(fmt, args); va_end(args); #endif return 1; } extern void printm(int, char *); #ifndef SC #define getframe(i) &trail[i]; #else static long HHH, DDD, hiwater; static long CNT1, CNT2; static int stackwrite; static int stackread; static Trail frameptr; Trail * getframe(long d) { if (CNT1 == CNT2) return &trail[d]; if (d >= (CNT1-CNT2)*DDD) return &trail[d - (CNT1-CNT2)*DDD]; if (!stackread && (stackread = open(stackfile, 0)) < 0) { printf("getframe: cannot open %s\n", stackfile); wrapup(); } if (lseek(stackread, d* (off_t) sizeof(Trail), SEEK_SET) == -1 || read(stackread, &frameptr, sizeof(Trail)) != sizeof(Trail)) { printf("getframe: frame read error\n"); wrapup(); } return &frameptr; } #endif #if NCORE>1 extern void cleanup_shm(int); volatile uint *search_terminated; /* to signal early termination */ #endif void pan_exit(int val) { void stop_timer(int); #ifdef BFS_PAR extern void bfs_mark_done(int); extern void bfs_drop_shared_memory(void); #endif if (signoff) { printf("--end of output--\n"); } #if NCORE>1 if (search_terminated != NULL) { *search_terminated |= 1; /* pan_exit */ } #ifdef USE_DISK { void dsk_stats(void); dsk_stats(); } #endif if (!state_tables && !readtrail) { cleanup_shm(1); } #endif #ifdef BFS_PAR if (who_am_i != 0) { bfs_mark_done(3); /* stopped */ } bfs_drop_shared_memory(); #endif if (val == 2) { val = 0; } #ifdef BFS_PAR if (who_am_i == 0) #endif stop_timer(1); #ifdef C_EXIT C_EXIT; /* trust that it defines a fct */ #endif exit(val); } #ifdef HAS_CODE static char tbuf[2][2048]; char * transmognify(char *s) { char *v, *w; int i, toggle = 0; if (!s || strlen(s) > 2047) return s; memset(tbuf[0], 0, 2048); memset(tbuf[1], 0, 2048); strcpy(tbuf[toggle], s); while ((v = strstr(tbuf[toggle], "{c_code"))) { *v = '\0'; v++; strcpy(tbuf[1-toggle], tbuf[toggle]); for (w = v; *w != '}' && *w != '\0'; w++) /* skip */; if (*w != '}') return s; *w = '\0'; w++; for (i = 0; code_lookup[i].c; i++) if (strcmp(v, code_lookup[i].c) == 0 && strlen(v) == strlen(code_lookup[i].c)) { if (strlen(tbuf[1-toggle]) + strlen(code_lookup[i].t) + strlen(w) > 2047) return s; strcat(tbuf[1-toggle], code_lookup[i].t); break; } strcat(tbuf[1-toggle], w); toggle = 1 - toggle; } tbuf[toggle][2047] = '\0'; return tbuf[toggle]; } #else char * transmognify(char *s) { return s; } #endif char o_cmdline[512]; char o_cmdname[128]; #ifdef HAS_CODE void add_src_txt(int ot, int tt) { Trans *t; char *q; for (t = trans[ot][tt]; t; t = t->nxt) { printf("\t\t"); q = transmognify(t->tp); for ( ; q && *q; q++) if (*q == '\n') printf("\\n"); else putchar(*q); printf("\n"); } } char * find_source(int tp, int s) { if (s >= flref[tp]->from && s <= flref[tp]->upto) { return flref[tp]->fnm; } return PanSource; /* i.e., don't know */ } void wrap_trail(void) { static int wrap_in_progress = 0; int i; short II; P0 *z; if (wrap_in_progress++) return; printf("spin: trail ends after %ld steps\n", depth); if (onlyproc >= 0) { if (onlyproc >= now._nr_pr) { pan_exit(0); } II = onlyproc; z = (P0 *)pptr(II); printf("%3ld: proc %d (%s) ", depth, II, procname[z->_t]); for (i = 0; src_all[i].src; i++) if (src_all[i].tp == (int) z->_t) { printf(" %s:%d", find_source((int) z->_t, (int) z->_p), src_all[i].src[z->_p]); break; } printf(" (state %2d)", z->_p); if (!stopstate[z->_t][z->_p]) printf(" (invalid end state)"); printf("\n"); add_src_txt(z->_t, z->_p); pan_exit(0); } printf("#processes %d:\n", now._nr_pr); if (depth < 0) depth = 0; for (II = 0; II < now._nr_pr; II++) { z = (P0 *)pptr(II); printf("%3ld: proc %d (%s) ", depth, II, procname[z->_t]); for (i = 0; src_all[i].src; i++) if (src_all[i].tp == (int) z->_t) { printf(" %s:%d", find_source((int) z->_t, (int) z->_p), src_all[i].src[z->_p]); break; } printf(" (state %2d)", z->_p); if (!stopstate[z->_t][z->_p]) printf(" (invalid end state)"); printf("\n"); add_src_txt(z->_t, z->_p); } c_globals(); for (II = 0; II < now._nr_pr; II++) { z = (P0 *)pptr(II); c_locals(II, z->_t); } #ifdef ON_EXIT ON_EXIT; #endif pan_exit(0); } FILE * findtrail(void) { FILE *fd; char fnm[512], *q; char MyFile[512]; char MySuffix[16]; int try_core; int candidate_files; if (trailfilename != NULL) { fd = fopen(trailfilename, "r"); if (fd == NULL) { printf("pan: cannot find %s\n", trailfilename); pan_exit(1); } /* else */ goto success; } talk: try_core = 1; candidate_files = 0; tprefix = "trail"; strcpy(MyFile, TrailFile); do { /* see if there's more than one possible trailfile */ if (whichtrail) { sprintf(fnm, "%s%d.%s", MyFile, whichtrail, tprefix); fd = fopen(fnm, "r"); if (fd != NULL) { candidate_files++; if (verbose==100) printf("trail%d: %s\n", candidate_files, fnm); fclose(fd); } if ((q = strchr(MyFile, '.')) != NULL) { *q = '\0'; sprintf(fnm, "%s%d.%s", MyFile, whichtrail, tprefix); *q = '.'; fd = fopen(fnm, "r"); if (fd != NULL) { candidate_files++; if (verbose==100) printf("trail%d: %s\n", candidate_files, fnm); fclose(fd); } } } else { sprintf(fnm, "%s.%s", MyFile, tprefix); fd = fopen(fnm, "r"); if (fd != NULL) { candidate_files++; if (verbose==100) printf("trail%d: %s\n", candidate_files, fnm); fclose(fd); } if ((q = strchr(MyFile, '.')) != NULL) { *q = '\0'; sprintf(fnm, "%s.%s", MyFile, tprefix); *q = '.'; fd = fopen(fnm, "r"); if (fd != NULL) { candidate_files++; if (verbose==100) printf("trail%d: %s\n", candidate_files, fnm); fclose(fd); } } } tprefix = MySuffix; sprintf(tprefix, "cpu%d_trail", try_core++); } while (try_core <= NCORE); if (candidate_files != 1) { if (verbose != 100) { printf("error: there are %d trail files:\n", candidate_files); verbose = 100; goto talk; } else { printf("pan: rm or mv all except one\n"); exit(1); } } try_core = 1; strcpy(MyFile, TrailFile); /* restore */ tprefix = "trail"; try_again: if (whichtrail) { sprintf(fnm, "%s%d.%s", MyFile, whichtrail, tprefix); fd = fopen(fnm, "r"); if (fd == NULL && (q = strchr(MyFile, '.'))) { *q = '\0'; sprintf(fnm, "%s%d.%s", MyFile, whichtrail, tprefix); *q = '.'; fd = fopen(fnm, "r"); } } else { sprintf(fnm, "%s.%s", MyFile, tprefix); fd = fopen(fnm, "r"); if (fd == NULL && (q = strchr(MyFile, '.'))) { *q = '\0'; sprintf(fnm, "%s.%s", MyFile, tprefix); *q = '.'; fd = fopen(fnm, "r"); } } if (fd == NULL) { if (try_core < NCORE) { tprefix = MySuffix; sprintf(tprefix, "cpu%d_trail", try_core++); goto try_again; } printf("pan: cannot find trailfile %s\n", fnm); pan_exit(1); } success: #if NCORE>1 && defined(SEP_STATE) { void set_root(void); /* for partial traces from local root */ set_root(); } #endif return fd; } uchar do_transit(Trans *, short); #ifdef PERMUTED void set_permuted(int); void set_reversed(int); void set_rotated(int); void set_randrot(int); void (*p_reorder)(int) = set_permuted; short p_rotate; #endif void getrail(void) { FILE *fd; char *q, *pnm; int i, t_id, lastnever = -1; short II; Trans *t; P0 *z; #ifdef PERMUTED char sbuf[128]; memset(sbuf, 0, sizeof(sbuf)); #endif fd = findtrail(); /* exits if unsuccessful */ while (fscanf(fd, "%ld:%d:%d\n", &depth, &i, &t_id) == 3) { if (depth == -1) { printf("<<<<>>>>\n"); } #ifdef PERMUTED if (depth < 0) { switch (depth) { case -5: if (i && !t_reverse) { strcat(sbuf, "-t_reverse "); } break; case -6: if (i && p_reorder != set_permuted) { strcat(sbuf, "-p_permute "); } else if (t_id && p_reorder != set_reversed) { strcat(sbuf, "-p_reverse "); } break; case -7: if (i && (p_reorder != set_rotated || p_rotate != t_id)) { char tmp[32]; sprintf(tmp, "-p_rotate%d ", t_id); strcat(sbuf, tmp); } break; case -8: if (i && p_reorder != set_randrot) { strcat(sbuf, "-p_randrot "); } if (s_rand != t_id) { char tmp[32]; sprintf(tmp, "-RS%u ", (uint) t_id); strcat(sbuf, tmp); } break; default: continue; } } #endif if (depth < 0) { continue; } #ifdef PERMUTED if (strlen(sbuf) > 0) { char *restart = emalloc( strlen(o_cmdname) + 1 + strlen(sbuf) + 1 + strlen(o_cmdline) + 1); sprintf(restart, "%s %s %s", o_cmdname, o_cmdline, sbuf); if (system(restart) < 0) { fprintf(efd, "command failed: %s\n", restart); } exit(1); } #endif if (i > now._nr_pr) { printf("pan: Error, proc %d invalid pid ", i); printf("transition %d\n", t_id); break; } II = i; z = (P0 *)pptr(II); for (t = trans[z->_t][z->_p]; t; t = t->nxt) if (t->t_id == (T_ID) t_id) break; if (!t) { for (i = 0; i < NrStates[z->_t]; i++) { t = trans[z->_t][i]; if (t && t->t_id == (T_ID) t_id) { printf("\tRecovered at state %d\n", i); z->_p = i; goto recovered; } } printf("pan: Error, proc %d type %d state %d: ", II, z->_t, z->_p); printf("transition %d not found\n", t_id); printf("pan: list of possible transitions in this process:\n"); if (z->_t >= 0 && z->_t <= _NP_) for (t = trans[z->_t][z->_p]; t; t = t->nxt) printf(" t_id %d -- case %d, [%s]\n", t->t_id, t->forw, t->tp); break; /* pan_exit(1); */ } recovered: q = transmognify(t->tp); if (gui) simvals[0] = '\0'; pnm = procname[z->_t]; _this = pptr(II); trpt->tau |= 1; if (!do_transit(t, II)) { if (onlyproc >= 0 && II != onlyproc) goto moveon; if (!verbose) break; printf("pan: error, next transition UNEXECUTABLE on replay\n"); printf(" most likely causes: missing c_track statements\n"); printf(" or illegal side-effects in c_expr statements\n"); } if (onlyproc >= 0 && II != onlyproc) goto moveon; if (verbose) { printf("%3ld: proc %2d (%s) ", depth, II, pnm); for (i = 0; src_all[i].src; i++) if (src_all[i].tp == (int) z->_t) { printf(" %s:%d ", find_source((int) z->_t, (int) z->_p), src_all[i].src[z->_p]); break; } printf("(state %d) trans {%d,%d} [%s]\n", z->_p, t_id, t->forw, q?q:""); c_globals(); for (i = 0; i < now._nr_pr; i++) { c_locals(i, ((P0 *)pptr(i))->_t); } } else if (Btypes[z->_t] == N_CLAIM) { if (lastnever != (int) z->_p) { for (i = 0; src_all[i].src; i++) if (src_all[i].tp == (int) z->_t) { printf("MSC: ~G %d\n", src_all[i].src[z->_p]); break; } if (!src_all[i].src) printf("MSC: ~R %d\n", z->_p); } lastnever = z->_p; goto sameas; } else if (Btypes[z->_t] != 0) /* not :np_: */ { sameas: if (no_rck) goto moveon; if (coltrace) { printf("%ld: ", depth); for (i = 0; i < II; i++) printf("\t\t"); printf("%s(%d):", pnm, II); printf("[%s]\n", q?q:""); } else if (!silent) { #ifdef MYSTEP #include "mystep.c" #endif if (strlen(simvals) > 0) { printf("%3ld: proc %2d (%s)", depth, II, pnm); for (i = 0; src_all[i].src; i++) if (src_all[i].tp == (int) z->_t) { printf(" %s:%d ", find_source((int) z->_t, (int) z->_p), src_all[i].src[z->_p]); break; } printf("(state %d) [values: %s]\n", z->_p, simvals); } printf("%3ld: proc %2d (%s)", depth, II, pnm); for (i = 0; src_all[i].src; i++) if (src_all[i].tp == (int) z->_t) { printf(" %s:%d ", find_source((int) z->_t, (int) z->_p), src_all[i].src[z->_p]); break; } printf("(state %d) [%s]\n", z->_p, q?q:""); /* printf("\n"); */ } } moveon: z->_p = t->st; } wrap_trail(); } #endif int f_pid(int pt) { int i; P0 *z; for (i = 0; i < now._nr_pr; i++) { z = (P0 *)pptr(i); if (z->_t == (unsigned) pt) return BASE+z->_pid; } return -1; } #if NCORE>1 && !defined(GLOB_HEAP) #define SEP_HEAP /* version 5.1.2 */ #endif #ifdef BITSTATE int bstore_mod(char *v, int n) /* hasharray size not a power of two */ { ulong x, y; uint i = 1; #if defined(MURMUR) && (WS==8) m_hash((uchar *) v, n); /* bstore_mod - sets j3_spin, j4_spin, K1, K2 */ #else d_hash((uchar *) v, n); /* bstore_mod - sets j3_spin, j4_spin, K1, K2 */ #endif x = K1; y = j3_spin; for (;;) { if (!(SS[x%udmem]&(1< RANDSTOR) return 0; #endif for (;;) { SS[x%udmem] |= (1< RANDSTOR) return 0; #endif for (;;) { SS[x] |= (1< 0) { #ifdef PUTPID sprintf(fnm, "%s_%s_%d_%d.%s", MyFile, progname, getpid(), Nr_Trails-1, tprefix); #else sprintf(fnm, "%s%d.%s", MyFile, Nr_Trails-1, tprefix); #endif } else { #ifdef PUTPID sprintf(fnm, "%s_%s_%d.%s", MyFile, progname, getpid(), tprefix); #else sprintf(fnm, "%s.%s", MyFile, tprefix); #endif } if ((fd = open(fnm, w_flags, TMODE)) < 0) { if ((q = strchr(MyFile, '.'))) { *q = '\0'; if (iterative == 0 && Nr_Trails-1 > 0) sprintf(fnm, "%s%d.%s", MyFile, Nr_Trails-1, tprefix); else sprintf(fnm, "%s.%s", MyFile, tprefix); *q = '.'; fd = open(fnm, w_flags, TMODE); } } if (fd < 0) { printf("pan: cannot create %s\n", fnm); perror("cause"); } else { #if NCORE>1 && (defined(SEP_STATE) || !defined(FULL_TRAIL)) void write_root(void); write_root(); #else printf("pan: wrote %s\n", fnm); #endif } return fd; } #ifndef FREQ #define FREQ (1000000) #endif double freq = (double) FREQ; #ifdef TRIX void sv_populate(void); void re_populate(void) /* restore procs and chans from now._ids_ */ { int i, cnt = 0; char *b; #ifdef V_TRIX printf("%4d: re_populate\n", depth); #endif for (i = 0; i < now._nr_pr; i++, cnt++) { b = now._ids_[cnt]; processes[i]->psize = what_p_size( ((P0 *)b)->_t ); memcpy(processes[i]->body, b, processes[i]->psize); #ifdef TRIX_RIX ((P0 *)pptr(i))->_pid = i; if (BASE > 0 && h > 0) { ((P0 *)pptr(i))->_pid -= BASE; } #endif #ifndef BFS processes[i]->modified = 1; /* re-populate */ #endif } for (i = 0; i < now._nr_qs; i++, cnt++) { b = now._ids_[cnt]; channels[i]->psize = what_q_size( ((Q0 *)b)->_t ); memcpy(channels[i]->body, b, channels[i]->psize); #ifndef BFS channels[i]->modified = 1; /* re-populate */ #endif } } #endif #ifdef BFS #ifndef BFS_PAR BFS_State *bfs_trail, *bfs_bot, *bfs_free; SV_Hold *svfree; #else static ulong bfs_pre_allocated; #endif #ifdef BFS_DISK #ifndef BFS_LIMIT #define BFS_LIMIT 100000 #endif #ifndef BFS_DSK_LIMIT #define BFS_DSK_LIMIT 1000000 #endif #if defined(WIN32) || defined(WIN64) #define RFLAGS (O_RDONLY|O_BINARY) #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY) #define RWFLAGS (O_RDWR|O_BINARY) #else #define RFLAGS (O_RDONLY) #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC) #define RWFLAGS (O_RDWR) #endif long bfs_size_limit; int bfs_dsk_write = -1; int bfs_dsk_read = -1; long bfs_dsk_writes, bfs_dsk_reads; int bfs_dsk_seqno_w, bfs_dsk_seqno_r; #endif uchar do_reverse(Trans *, short, uchar); void snapshot(void); #if 0 void select_claim(int x) /* ignored in BFS mode */ { if (verbose) { printf("select %d (ignored)\n", x); } } #endif Trail *ntrpt; #ifndef BFS_PAR SV_Hold * getsv(int n) { SV_Hold *h; if (svfree && n <= svfree->sz) { h = svfree; svfree = h->nxt; h->nxt = (SV_Hold *) 0; } else { h = (SV_Hold *) emalloc(sizeof(SV_Hold)); h->sz = n; #ifdef BFS_DISK if (bfs_size_limit >= BFS_LIMIT) { h->sv = (State *) 0; /* means: read disk */ bfs_dsk_writes++; /* count */ if (bfs_dsk_write < 0 /* file descriptor */ || bfs_dsk_writes%BFS_DSK_LIMIT == 0) { char dsk_nm[32]; if (bfs_dsk_write >= 0) { (void) close(bfs_dsk_write); } sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_w++); bfs_dsk_write = open(dsk_nm, WFLAGS, 0644); if (bfs_dsk_write < 0) { Uerror("could not create tmp disk file"); } printf("pan: created disk file %s\n", dsk_nm); } if (write(bfs_dsk_write, (char *) &now, n) != n) { Uerror("aborting -- disk write failed (disk full?)"); } return h; /* no memcpy */ } bfs_size_limit++; #endif h->sv = (State *) emalloc(sizeof(State) - VECTORSZ + n); } memcpy((char *)h->sv, (char *)&now, n); return h; } EV_Hold * getsv_mask(int n) { EV_Hold *h; static EV_Hold *kept = (EV_Hold *) 0; for (h = kept; h; h = h->nxt) if (n == h->sz #if !defined(NOCOMP) && !defined(HC) && (memcmp((char *) Mask, (char *) h->sv, n) == 0) #endif && (now._nr_pr == h->nrpr) && (now._nr_qs == h->nrqs) #ifdef TRIX ) #else #if VECTORSZ>32000 && (memcmp((char *) proc_offset, (char *) h->po, now._nr_pr * sizeof(int)) == 0) && (memcmp((char *) q_offset, (char *) h->qo, now._nr_qs * sizeof(int)) == 0) #else && (memcmp((char *) proc_offset, (char *) h->po, now._nr_pr * sizeof(short)) == 0) && (memcmp((char *) q_offset, (char *) h->qo, now._nr_qs * sizeof(short)) == 0) #endif && (memcmp((char *) proc_skip, (char *) h->ps, now._nr_pr * sizeof(uchar)) == 0) && (memcmp((char *) q_skip, (char *) h->qs, now._nr_qs * sizeof(uchar)) == 0)) #endif break; if (!h) { h = (EV_Hold *) emalloc(sizeof(EV_Hold)); h->sz = n; h->nrpr = now._nr_pr; h->nrqs = now._nr_qs; h->sv = (char *) emalloc(n * sizeof(char)); #if !defined(NOCOMP) && !defined(HC) memcpy((char *) h->sv, (char *) Mask, n); #endif #ifndef TRIX if (now._nr_pr > 0) { h->ps = (char *) emalloc(now._nr_pr * sizeof(int)); memcpy((char *) h->ps, (char *) proc_skip, now._nr_pr * sizeof(uchar)); #if VECTORSZ>32000 h->po = (char *) emalloc(now._nr_pr * sizeof(int)); memcpy((char *) h->po, (char *) proc_offset, now._nr_pr * sizeof(int)); #else h->po = (char *) emalloc(now._nr_pr * sizeof(short)); memcpy((char *) h->po, (char *) proc_offset, now._nr_pr * sizeof(short)); #endif } if (now._nr_qs > 0) { h->qs = (char *) emalloc(now._nr_qs * sizeof(int)); memcpy((char *) h->qs, (char *) q_skip, now._nr_qs * sizeof(uchar)); #if VECTORSZ>32000 h->qo = (char *) emalloc(now._nr_qs * sizeof(int)); memcpy((char *) h->qo, (char *) q_offset, now._nr_qs * sizeof(int)); #else h->qo = (char *) emalloc(now._nr_qs * sizeof(short)); memcpy((char *) h->qo, (char *) q_offset, now._nr_qs * sizeof(short)); #endif } #endif h->nxt = kept; kept = h; } return h; } void freesv(SV_Hold *p) { SV_Hold *h, *oh; oh = (SV_Hold *) 0; for (h = svfree; h; oh = h, h = h->nxt) { if (p->sz >= h->sz) break; } if (!oh) { p->nxt = svfree; svfree = p; } else { p->nxt = h; oh->nxt = p; } } BFS_State * get_bfs_frame(void) { BFS_State *t; if (bfs_free) { t = bfs_free; bfs_free = bfs_free->nxt; t->nxt = (BFS_State *) 0; } else { t = (BFS_State *) emalloc(sizeof(BFS_State)); } t->frame = (Trail *) emalloc(sizeof(Trail)); /* new because we keep a ptr to the frame of parent states */ /* used for reconstructing path and recovering failed rvs etc */ return t; } void push_bfs(Trail *f, int d) { BFS_State *t; t = get_bfs_frame(); memcpy((char *)t->frame, (char *)f, sizeof(Trail)); t->frame->o_tt = d; /* depth */ t->boq = boq; #ifdef TRIX sv_populate(); #endif t->onow = getsv(vsize); t->omask = getsv_mask(vsize); #if defined(FULLSTACK) && defined(Q_PROVISO) t->lstate = Lstate; /* bfs */ #endif if (!bfs_bot) { bfs_bot = bfs_trail = t; } else { bfs_bot->nxt = t; bfs_bot = t; } #ifdef VERBOSE t->nr = nstates; #endif #ifdef CHECK #ifdef VERBOSE printf("PUSH %lu (depth %d, nr %lu)\n", (ulong) t->frame, d, t->nr); #else printf("PUSH %lu (depth %d)\n", (ulong) t->frame, d); #endif #endif } Trail * pop_bfs(void) { BFS_State *t; if (!bfs_trail) { return (Trail *) 0; } t = bfs_trail; bfs_trail = t->nxt; if (!bfs_trail) { bfs_bot = (BFS_State *) 0; } #if defined(Q_PROVISO) && !defined(BITSTATE) && !defined(NOREDUCE) if (t->lstate) /* bfs */ { t->lstate->tagged = 0; /* bfs */ } #endif t->nxt = bfs_free; bfs_free = t; vsize = t->onow->sz; boq = t->boq; #ifdef BFS_DISK if (t->onow->sv == (State *) 0) { char dsk_nm[32]; bfs_dsk_reads++; /* count */ if (bfs_dsk_read >= 0 /* file descriptor */ && bfs_dsk_reads%BFS_DSK_LIMIT == 0) { (void) close(bfs_dsk_read); sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_r-1); (void) unlink(dsk_nm); bfs_dsk_read = -1; } if (bfs_dsk_read < 0) { sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_r++); bfs_dsk_read = open(dsk_nm, RFLAGS); if (bfs_dsk_read < 0) { Uerror("could not open temp disk file"); } } if (read(bfs_dsk_read, (char *) &now, vsize) != vsize) { Uerror("bad bfs disk file read"); } #ifndef NOVSZ if (now._vsz != vsize) { Uerror("disk read vsz mismatch"); } #endif } else #endif { memcpy((uchar *) &now, (uchar *) t->onow->sv, vsize); #ifndef NOVSZ vsize = now._vsz; #endif } #if !defined(NOCOMP) && !defined(HC) memcpy((uchar *) Mask, (uchar *) t->omask->sv, vsize); #endif #ifdef TRIX re_populate(); #else if (now._nr_pr > 0) #if VECTORSZ>32000 { memcpy((char *)proc_offset, (char *)t->omask->po, now._nr_pr * sizeof(int)); #else { memcpy((char *)proc_offset, (char *)t->omask->po, now._nr_pr * sizeof(short)); #endif memcpy((char *)proc_skip, (char *)t->omask->ps, now._nr_pr * sizeof(uchar)); } if (now._nr_qs > 0) #if VECTORSZ>32000 { memcpy((uchar *)q_offset, (uchar *)t->omask->qo, now._nr_qs * sizeof(int)); #else { memcpy((uchar *)q_offset, (uchar *)t->omask->qo, now._nr_qs * sizeof(short)); #endif memcpy((uchar *)q_skip, (uchar *)t->omask->qs, now._nr_qs * sizeof(uchar)); } #endif #ifdef BFS_DISK if (t->onow->sv != (State *) 0) #endif { freesv(t->onow); /* omask not freed */ } #ifdef CHECK #ifdef VERBOSE printf("POP %lu (depth %d, nr %lu)\n", (ulong) t->frame, t->frame->o_tt, t->nr); #else printf("POP %lu (depth %d)\n", (ulong) t->frame, t->frame->o_tt); #endif #endif return t->frame; } void store_state(Trail *ntrpt, int shortcut, short oboq) { #ifdef VERI Trans *t2 = (Trans *) 0; uchar ot; int tt, E_state; uchar o_opm = trpt->o_pm, *othis = _this; if (shortcut) { #ifdef VERBOSE printf("claim: shortcut\n"); #endif goto store_it; /* no claim move */ } _this = pptr(0); /* 0 = never claim */ trpt->o_pm = 0; tt = (int) ((P0 *)_this)->_p; ot = (uchar) ((P0 *)_this)->_t; #ifdef HAS_UNLESS E_state = 0; #endif for (t2 = trans[ot][tt]; t2; t2 = t2?t2->nxt:(Trans *)0) { #ifdef HAS_UNLESS if (E_state > 0 && E_state != t2->e_trans) { break; } #endif if (do_transit(t2, 0)) { #ifdef VERBOSE if (!reached[ot][t2->st]) printf("depth: %d -- claim move from %d -> %d\n", trpt->o_tt, ((P0 *)_this)->_p, t2->st); #endif #ifdef HAS_UNLESS E_state = t2->e_trans; #endif if (t2->st > 0) { ((P0 *)_this)->_p = t2->st; reached[ot][t2->st] = 1; #ifndef NOCLAIM if (stopstate[ot][t2->st]) { uerror("end state in claim reached"); } #endif } if (now._nr_pr == 0) /* claim terminated */ uerror("end state in claim reached"); #ifdef PEG peg[t2->forw]++; #endif trpt->o_pm |= 1; if (t2->atom&2) { Uerror("atomic in claim not supported in BFS"); } store_it: #endif #if defined(BITSTATE) if (!b_store((char *)&now, vsize)) #elif defined(MA) if (!g_store((char *)&now, vsize, 0)) #else if (!h_store((char *)&now, vsize)) #endif { static long sdone = (long) 0; long ndone; nstates++; #ifndef NOREDUCE trpt->tau |= 64; #endif ndone = (ulong) (nstates/(freq)); if (ndone != sdone && mreached%10 != 0) { snapshot(); sdone = ndone; #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA) if (nstates > ((double)(1<<(ssize+1)))) { void resize_hashtable(void); resize_hashtable(); } #endif } #if SYNC if (boq != -1) midrv++; else if (oboq != -1) { Trail *x; x = (Trail *) trpt->ostate; /* pre-rv state */ if (x) x->o_pm |= 4; /* mark success */ } #endif push_bfs(ntrpt, trpt->o_tt+1); } else { truncs++; #if defined(Q_PROVISO) && !defined(NOREDUCE) && defined(FULLSTACK) #if !defined(BITSTATE) if (Lstate && Lstate->tagged) { trpt->tau |= 64; } #else if (trpt->tau&32) { BFS_State *tprov; for (tprov = bfs_trail; tprov; tprov = tprov->nxt) if (tprov->onow->sv != (State *) 0 && memcmp((uchar *)&now, (uchar *)tprov->onow->sv, vsize) == 0) { trpt->tau |= 64; break; /* state is in queue */ } } #endif #endif } #ifdef VERI ((P0 *)_this)->_p = tt; /* reset claim */ if (t2) do_reverse(t2, 0, 0); else break; } } _this = othis; trpt->o_pm = o_opm; #endif } void bfs(void) { Trans *t; Trail *otrpt, *x; uchar _n, _m, ot, nps = 0; int tt, E_state; short II, From = (short) (now._nr_pr-1), To = BASE; short oboq = boq; ntrpt = (Trail *) emalloc(sizeof(Trail)); trpt->ostate = (H_el *) 0; trpt->tau = 0; trpt->o_tt = -1; store_state(ntrpt, 0, oboq); /* initial state */ while ((otrpt = pop_bfs())) /* also restores now */ { memcpy((char *) trpt, (char *) otrpt, sizeof(Trail)); #if defined(C_States) && (HAS_TRACK==1) c_revert((uchar *) &(now.c_state[0])); #endif if (trpt->o_pm & 4) { #ifdef VERBOSE printf("Revisit of atomic not needed (%d)\n", trpt->o_pm); #endif continue; } #ifndef NOREDUCE nps = 0; #endif if (trpt->o_pm == 8) { revrv++; if (trpt->tau&8) { #ifdef VERBOSE printf("Break atomic (pm:%d,tau:%d)\n", trpt->o_pm, trpt->tau); #endif trpt->tau &= ~8; } #ifndef NOREDUCE else if (trpt->tau&32) { #ifdef VERBOSE printf("Void preselection (pm:%d,tau:%d)\n", trpt->o_pm, trpt->tau); #endif trpt->tau &= ~32; nps = 1; /* no preselection in repeat */ } #endif } trpt->o_pm &= ~(4|8); if (trpt->o_tt > mreached) { mreached = trpt->o_tt; if (mreached%10 == 0) { snapshot(); } } depth = trpt->o_tt; if (depth >= maxdepth) { #if SYNC Trail *x; if (boq != -1) { x = (Trail *) trpt->ostate; if (x) x->o_pm |= 4; /* not failing */ } #endif truncs++; if (!warned) { warned = 1; printf("error: max search depth too small\n"); } if (bounded) { uerror("depth limit reached"); } continue; } #ifndef NOREDUCE if (boq == -1 && !(trpt->tau&8) && nps == 0) for (II = now._nr_pr-1; II >= BASE; II -= 1) { Pickup: _this = pptr(II); tt = (int) ((P0 *)_this)->_p; ot = (uchar) ((P0 *)_this)->_t; if (trans[ot][tt]->atom & 8) { t = trans[ot][tt]; if (t->qu[0] != 0) { Ccheck++; if (!q_cond(II, t)) continue; Cholds++; } From = To = II; trpt->tau |= 32; /* preselect marker */ #ifdef DEBUG printf("%3ld: proc %d PreSelected (tau=%d)\n", depth, II, trpt->tau); #endif goto MainLoop; } } trpt->tau &= ~32; #endif Repeat: if (trpt->tau&8) /* atomic */ { From = To = (short ) trpt->pr; nlinks++; } else { From = now._nr_pr-1; To = BASE; } MainLoop: _n = _m = 0; for (II = From; II >= To; II -= 1) { _this = pptr(II); tt = (int) ((P0 *)_this)->_p; ot = (uchar) ((P0 *)_this)->_t; #if SYNC /* no rendezvous with same proc */ if (boq != -1 && trpt->pr == II) { continue; } #endif ntrpt->pr = (uchar) II; ntrpt->st = tt; trpt->o_pm &= ~1; /* no move yet */ #ifdef EVENT_TRACE trpt->o_event = now._event; #endif #ifdef HAS_PRIORITY if (!highest_priority(((P0 *)_this)->_pid, II, t)) { continue; } #else #ifdef HAS_PROVIDED if (!provided(II, ot, tt, t)) { continue; } #endif #endif #ifdef HAS_UNLESS E_state = 0; #endif for (t = trans[ot][tt]; t; t = t->nxt) { #ifdef HAS_UNLESS if (E_state > 0 && E_state != t->e_trans) break; #endif ntrpt->o_t = t; oboq = boq; if (!(_m = do_transit(t, II))) continue; trpt->o_pm |= 1; /* we moved */ (trpt+1)->o_m = _m; /* for unsend */ #ifdef PEG peg[t->forw]++; #endif #ifdef CHECK printf("%3ld: proc %d exec %d, ", depth, II, t->forw); printf("%d to %d, %s %s %s", tt, t->st, t->tp, (t->atom&2)?"atomic":"", (boq != -1)?"rendez-vous":""); #ifdef HAS_UNLESS if (t->e_trans) printf(" (escapes to state %d)", t->st); #endif printf(" %saccepting [tau=%d]\n", (trpt->o_pm&2)?"":"non-", trpt->tau); #endif #ifdef HAS_UNLESS E_state = t->e_trans; #if SYNC>0 if (t->e_trans > 0 && (boq != -1 /* || oboq != -1 */)) { fprintf(efd, "error: a rendezvous stmnt in the escape clause\n"); fprintf(efd, " of an unless stmnt is not compatible with -DBFS\n"); pan_exit(1); } #endif #endif if (t->st > 0) { ((P0 *)_this)->_p = t->st; } /* ptr to pred: */ ntrpt->ostate = (H_el *) otrpt; ntrpt->st = tt; if (boq == -1 && (t->atom&2)) /* atomic */ ntrpt->tau = 8; /* record for next move */ else ntrpt->tau = 0; store_state(ntrpt, (boq != -1 || (t->atom&2)), oboq); #ifdef EVENT_TRACE now._event = trpt->o_event; #endif /* undo move and continue */ trpt++; /* this is where ovals and ipt are set */ do_reverse(t, II, _m); /* restore now. */ trpt--; #ifdef CHECK enter_critical(GLOBAL_LOCK); /* verbose mode */ #if NCORE>1 printf("cpu%d: ", core_id); #endif printf("%3lu: proc %d ", depth, II); printf("reverses %d, %d to %d,", t->forw, tt, t->st); printf(" %s [abit=%d,adepth=%ld,", t->tp, now._a_t, A_depth); printf("tau=%d,%d]\n", trpt->tau, (trpt-1)->tau); leave_critical(GLOBAL_LOCK); #endif reached[ot][t->st] = 1; reached[ot][tt] = 1; ((P0 *)_this)->_p = tt; _n |= _m; } } #ifndef NOREDUCE /* preselected - no succ definitely outside stack */ if ((trpt->tau&32) && !(trpt->tau&64)) { From = now._nr_pr-1; To = BASE; #ifdef DEBUG cpu_printf("%3ld: proc %d UnSelected (_n=%d, tau=%d)\n", depth, II+1, (int) _n, trpt->tau); #endif _n = 0; trpt->tau &= ~32; if (II >= BASE) { goto Pickup; } goto MainLoop; } trpt->tau &= ~(32|64); #endif if (_n != 0) { continue; } #ifdef DEBUG printf("%3ld: no move [II=%d, tau=%d, boq=%d, _nr_pr=%d]\n", depth, II, trpt->tau, boq, now._nr_pr); #endif if (boq != -1) { failedrv++; x = (Trail *) trpt->ostate; /* pre-rv state */ if (!x) { continue; /* root state */ } if ((x->tau&8) || (x->tau&32)) /* break atomic or preselect at parent */ { x->o_pm |= 8; /* mark failure */ _this = pptr(otrpt->pr); #ifdef VERBOSE printf("\treset state of %d from %d to %d\n", otrpt->pr, ((P0 *)_this)->_p, otrpt->st); #endif ((P0 *)_this)->_p = otrpt->st; unsend(boq); /* retract rv offer */ boq = -1; push_bfs(x, x->o_tt); #ifdef VERBOSE printf("failed rv, repush with %d\n", x->o_pm); #endif } #ifdef VERBOSE else { printf("failed rv, tau at parent: %d\n", x->tau); } #endif } else if (now._nr_pr > 0) { if ((trpt->tau&8)) /* atomic */ { trpt->tau &= ~(1|8); /* 1=timeout, 8=atomic */ #ifdef DEBUG printf("%3ld: atomic step proc %d blocks\n", depth, II+1); #endif goto Repeat; } if (!(trpt->tau&1)) /* didn't try timeout yet */ { trpt->tau |= 1; #ifdef DEBUG printf("%ld: timeout\n", depth); #endif goto MainLoop; } #ifndef VERI if (!noends && !a_cycles && !endstate()) { uerror("invalid end state"); } #endif } } } #endif void putter(Trail *trpt, int fd) { long j; if (!trpt) return; if (trpt != (Trail *) trpt->ostate) putter((Trail *) trpt->ostate, fd); if (trpt->o_t) { sprintf(snap, "%d:%d:%d\n", trcnt++, trpt->pr, trpt->o_t->t_id); j = strlen(snap); if (write(fd, snap, j) != j) { printf("pan: error writing %s\n", fnm); pan_exit(1); } } } void n_ewrite(int fd, char *s, int n) { if (write(fd, s, strlen(s)) != strlen(s)) { printf("pan: error writing %s\n", fnm); pan_exit(1); } } void nuerror(void) { int fd = make_trail(); int j; if (fd < 0) return; #ifdef VERI sprintf(snap, "-2:%d:-2\n", (uchar) ((P0 *)pptr(0))->_t); n_ewrite(fd, snap, strlen(snap)); #endif #ifdef MERGED sprintf(snap, "-4:-4:-4\n"); n_ewrite(fd, snap, strlen(snap)); #endif trcnt = 1; putter(trpt, fd); if (ntrpt->o_t) { sprintf(snap, "%d:%d:%d\n", trcnt++, ntrpt->pr, ntrpt->o_t->t_id); j = strlen(snap); n_ewrite(fd, snap, j); } close(fd); if (errors >= upto && upto != 0) { wrapup(); } } #endif #if (NCORE>1 || defined(BFS_PAR)) && !defined(WIN32) && !defined(WIN64) /* Test and Set assembly code */ #if defined(i386) || defined(__i386__) || defined(__x86_64__) int tas(volatile int *s) /* tested */ { int r; __asm__ __volatile__( "xchgl %0, %1 \n\t" : "=r"(r), "=m"(*s) : "0"(1), "m"(*s) : "memory"); return r; } #elif defined(__arm__) int tas(volatile int *s) /* not tested */ { int r = 1; __asm__ __volatile__( "swpb %0, %0, [%3] \n" : "=r"(r), "=m"(*s) : "0"(r), "r"(s)); return r; } #elif defined(sparc) || defined(__sparc__) int tas(volatile int *s) /* not tested */ { int r = 1; __asm__ __volatile__( " ldstub [%2], %0 \n" : "=r"(r), "=m"(*s) : "r"(s)); return r; } #elif defined(ia64) || defined(__ia64__) /* Intel Itanium */ int tas(volatile int *s) /* tested */ { long int r; __asm__ __volatile__( " xchg4 %0=%1,%2 \n" : "=r"(r), "+m"(*s) : "r"(1) : "memory"); return (int) r; } #elif defined(__powerpc64__) int tas(volatile int *s) /* courtesy srirajpaul */ { int r; #if 1 r = __sync_lock_test_and_set(); #else /* xlc compiler only */ r = __fetch_and_or(s, 1); __isync(); #endif return r; } #else #error missing definition of test and set operation for this platform #endif #ifndef NO_CAS #define cas(a,b,c) __sync_bool_compare_and_swap(a,b,c) #else int cas(volatile uint32_t *a, uint32_t b, uint32_t c) { static volatile int cas_lock; while (tas(&cas_lock) != 0) { ; } if (*a == b) { *a = c; cas_lock = 0; return 1; } cas_lock = 0; return 0; } #endif #endif #if NCORE>1 #if defined(WIN32) || defined(WIN64) #ifndef _CONSOLE #define _CONSOLE #endif #ifdef WIN64 #undef long #endif #include /* #ifdef WIN64 #define long long long #endif */ #else #include #include #include #endif /* code common to cygwin/linux and win32/win64: */ #ifdef VERBOSE #define VVERBOSE (1) #else #define VVERBOSE (0) #endif /* the following values must be larger than 256 and must fit in an int */ #define QUIT 1024 /* terminate now command */ #define QUERY 512 /* termination status query message */ #define QUERY_F 513 /* query failed, cannot quit */ #define GN_FRAMES (int) (GWQ_SIZE / (double) sizeof(SM_frame)) #define LN_FRAMES (int) (LWQ_SIZE / (double) sizeof(SM_frame)) #ifndef VMAX #define VMAX VECTORSZ #endif #ifndef PMAX #define PMAX 64 #endif #ifndef QMAX #define QMAX 64 #endif #if VECTORSZ>32000 #define OFFT int #else #define OFFT short #endif #ifdef SET_SEG_SIZE /* no longer useful -- being recomputed for local heap size anyway */ double SEG_SIZE = (((double) SET_SEG_SIZE) * 1048576.); #else double SEG_SIZE = (1048576.*1024.); /* 1GB default shared memory pool segments */ #endif double LWQ_SIZE = 0.; /* initialized in main */ #ifdef SET_WQ_SIZE #ifdef NGQ #warning SET_WQ_SIZE applies to global queue -- ignored double GWQ_SIZE = 0.; #else double GWQ_SIZE = (((double) SET_WQ_SIZE) * 1048576.); /* must match the value in pan_proxy.c, if used */ #endif #else #ifdef NGQ double GWQ_SIZE = 0.; #else double GWQ_SIZE = (128.*1048576.); /* 128 MB default queue sizes */ #endif #endif /* Crash Detection Parameters */ #ifndef ONESECOND #define ONESECOND (1<<25) #endif #ifndef SHORT_T #define SHORT_T (0.1) #endif #ifndef LONG_T #define LONG_T (600) #endif double OneSecond = (double) (ONESECOND); /* waiting for a free slot -- checks crash */ double TenSeconds = 10. * (ONESECOND); /* waiting for a lock -- check for a crash */ /* Termination Detection Params -- waiting for new state input in Get_Full_Frame */ double Delay = ((double) SHORT_T) * (ONESECOND); /* termination detection trigger */ double OneHour = ((double) LONG_T) * (ONESECOND); /* timeout termination detection */ typedef struct SM_frame SM_frame; typedef struct SM_results SM_results; typedef struct sh_Allocater sh_Allocater; struct SM_frame { /* about 6K per slot */ volatile int m_vsize; /* 0 means free slot */ volatile int m_boq; /* >500 is a control message */ #ifdef FULL_TRAIL volatile struct Stack_Tree *m_stack; /* ptr to previous state */ #endif volatile uchar m_tau; volatile uchar m_o_pm; volatile int nr_handoffs; /* to compute real_depth */ volatile char m_now [VMAX]; #if !defined(NOCOMP) && !defined(HC) volatile char m_mask [(VMAX + 7)/8]; #endif volatile OFFT m_p_offset[PMAX]; volatile OFFT m_q_offset[QMAX]; volatile uchar m_p_skip [PMAX]; volatile uchar m_q_skip [QMAX]; #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1) volatile uchar m_c_stack [StackSize]; #endif }; int proxy_pid; /* id of proxy if nonzero -- receive half */ int store_proxy_pid; short remote_party; int proxy_pid_snd; /* id of proxy if nonzero -- send half */ int iamin[CS_NR+NCORE]; /* non-shared */ #if defined(WIN32) || defined(WIN64) int tas(volatile LONG *); HANDLE proxy_handle_snd; /* for Windows Create and Terminate */ struct sh_Allocater { /* shared memory for states */ volatile char *dc_arena; /* to allocate states from */ volatile long pattern; /* to detect overruns */ volatile long dc_size; /* nr of bytes left */ volatile void *dc_start; /* where memory segment starts */ volatile void *dc_id; /* to attach, detach, remove shared memory segments */ volatile sh_Allocater *nxt; /* linked list of pools */ }; DWORD worker_pids[NCORE]; /* root mem of pids of all workers created */ HANDLE worker_handles[NCORE]; /* for windows Create and Terminate */ void * shmid [NR_QS]; /* return value from CreateFileMapping */ void * shmid_M; /* shared mem for state allocation in hashtable */ #ifdef SEP_STATE void *shmid_X; #else void *shmid_S; /* shared bitstate arena or hashtable */ #endif #else int tas(volatile int *); struct sh_Allocater { /* shared memory for states */ volatile char *dc_arena; /* to allocate states from */ volatile long pattern; /* to detect overruns */ volatile long dc_size; /* nr of bytes left */ volatile char *dc_start; /* where memory segment starts */ volatile int dc_id; /* to attach, detach, remove shared memory segments */ volatile sh_Allocater *nxt; /* linked list of pools */ }; int worker_pids[NCORE]; /* root mem of pids of all workers created */ int shmid [NR_QS]; /* return value from shmget */ int nibis = 0; /* set after shared mem has been released */ int shmid_M; /* shared mem for state allocation in hashtable */ #ifdef SEP_STATE long shmid_X; #else int shmid_S; /* shared bitstate arena or hashtable */ volatile sh_Allocater *first_pool; /* of shared state memory */ volatile sh_Allocater *last_pool; #endif #endif struct SM_results { /* for shuttling back final stats */ volatile int m_vsize; /* avoid conflicts with frames */ volatile int m_boq; /* these 2 fields are not written in record_info */ /* probably not all fields really need to be volatile */ volatile double m_memcnt; volatile double m_nstates; volatile double m_truncs; volatile double m_truncs2; volatile double m_nShadow; volatile double m_nlinks; volatile double m_ngrabs; volatile double m_nlost; volatile double m_hcmp; volatile double m_frame_wait; volatile int m_hmax; volatile int m_svmax; volatile int m_smax; volatile int m_mreached; volatile int m_errors; volatile int m_VMAX; volatile short m_PMAX; volatile short m_QMAX; volatile uchar m_R; /* reached info for all proctypes */ }; int core_id = 0; /* internal process nr, to know which q to use */ unsigned long nstates_put = 0; /* statistics */ unsigned long nstates_get = 0; int query_in_progress = 0; /* termination detection */ double free_wait = 0.; /* waiting for a free frame */ double frame_wait = 0.; /* waiting for a full frame */ double lock_wait = 0.; /* waiting for access to cs */ double glock_wait[3]; /* waiting for access to global lock */ char *sprefix = "rst"; uchar was_interrupted, issued_kill, writing_trail; static SM_frame cur_Root; /* current root, to be safe with error trails */ SM_frame *m_workq [NR_QS]; /* per cpu work queues + global q */ char *shared_mem[NR_QS]; /* return value from shmat */ #ifdef SEP_HEAP char *my_heap; long my_size; #endif volatile sh_Allocater *dc_shared; /* assigned at initialization */ static int vmax_seen, pmax_seen, qmax_seen; static double gq_tries, gq_hasroom, gq_hasnoroom; volatile int *prfree; volatile int *prfull; volatile int *prcnt; volatile int *prmax; volatile int *sh_lock; /* mutual exclusion locks - in shared memory */ volatile double *is_alive; /* to detect when processes crash */ volatile int *grfree, *grfull, *grcnt, *grmax; /* access to shared global q */ volatile double *gr_readmiss, *gr_writemiss; static int lrfree; /* used for temporary recording of slot */ static int dfs_phase2; void mem_put(int); /* handoff state to other cpu */ void mem_put_acc(void); /* liveness mode */ void mem_get(void); /* get state from work queue */ void sudden_stop(char *); void record_info(SM_results *r) { int i; uchar *ptr; #ifdef SEP_STATE if (0) { cpu_printf("nstates %g nshadow %g -- memory %-6.3f Mb\n", nstates, nShadow, memcnt/(1048576.)); } r->m_memcnt = 0; #else #ifdef BITSTATE r->m_memcnt = 0; /* it's shared */ #endif r->m_memcnt = memcnt; #endif if (a_cycles && core_id == 1) { r->m_nstates = nstates; r->m_nShadow = nstates; } else { r->m_nstates = nstates; r->m_nShadow = nShadow; } r->m_truncs = truncs; r->m_truncs2 = truncs2; r->m_nlinks = nlinks; r->m_ngrabs = ngrabs; r->m_nlost = nlost; r->m_hcmp = hcmp; r->m_frame_wait = frame_wait; r->m_hmax = hmax; r->m_svmax = svmax; r->m_smax = smax; r->m_mreached = mreached; r->m_errors = (int) errors; r->m_VMAX = vmax_seen; r->m_PMAX = (short) pmax_seen; r->m_QMAX = (short) qmax_seen; ptr = (uchar *) &(r->m_R); for (i = 0; i <= _NP_; i++) /* all proctypes */ { memcpy(ptr, reached[i], NrStates[i]*sizeof(uchar)); ptr += NrStates[i]*sizeof(uchar); } if (verbose>1) { cpu_printf("Put Results nstates %g (sz %d)\n", nstates, ptr - &(r->m_R)); } } void snapshot(void); void retrieve_info(SM_results *r) { int i, j; volatile uchar *ptr; snapshot(); /* for a final report */ enter_critical(GLOBAL_LOCK); #ifdef SEP_HEAP if (verbose) { printf("cpu%d: local heap-left %ld KB (%d MB)\n", core_id, (long) (my_size/1024), (int) (my_size/1048576)); } #endif if (verbose && core_id == 0) { printf("qmax: "); for (i = 0; i < NCORE; i++) { printf("%d ", prmax[i]); } #ifndef NGQ printf("G: %d", *grmax); #endif printf("\n"); } leave_critical(GLOBAL_LOCK); memcnt += r->m_memcnt; nstates += r->m_nstates; nShadow += r->m_nShadow; truncs += r->m_truncs; truncs2 += r->m_truncs2; nlinks += r->m_nlinks; ngrabs += r->m_ngrabs; nlost += r->m_nlost; hcmp += r->m_hcmp; /* frame_wait += r->m_frame_wait; */ errors += (unsigned long int) r->m_errors; if (hmax < r->m_hmax) hmax = r->m_hmax; if (svmax < r->m_svmax) svmax = r->m_svmax; if (smax < r->m_smax) smax = r->m_smax; if (mreached < r->m_mreached) mreached = r->m_mreached; if (vmax_seen < r->m_VMAX) vmax_seen = r->m_VMAX; if (pmax_seen < (int) r->m_PMAX) pmax_seen = (int) r->m_PMAX; if (qmax_seen < (int) r->m_QMAX) qmax_seen = (int) r->m_QMAX; ptr = &(r->m_R); for (i = 0; i <= _NP_; i++) /* all proctypes */ { for (j = 0; j < NrStates[i]; j++) { if (*(ptr + j) != 0) { reached[i][j] = 1; } } ptr += NrStates[i]*sizeof(uchar); } if (verbose>1) { cpu_printf("Got Results (%d)\n", (int) (ptr - &(r->m_R))); snapshot(); } } #if !defined(WIN32) && !defined(WIN64) static void rm_shared_segments(void) { int m; volatile sh_Allocater *nxt_pool; /* * mark all shared memory segments for removal * the actual removes wont happen intil last process dies or detaches * the shmctl calls can return -1 if not all procs have detached yet */ for (m = 0; m < NR_QS; m++) /* +1 for global q */ { if (shmid[m] != -1) { (void) shmctl(shmid[m], IPC_RMID, NULL); } } #ifdef SEP_STATE if (shmid_M != -1) { (void) shmctl(shmid_M, IPC_RMID, NULL); } #else if (shmid_S != -1) { (void) shmctl(shmid_S, IPC_RMID, NULL); } for (last_pool = first_pool; last_pool != NULL; last_pool = nxt_pool) { shmid_M = (int) (last_pool->dc_id); nxt_pool = last_pool->nxt; /* as a pre-caution only */ if (shmid_M != -1) { (void) shmctl(shmid_M, IPC_RMID, NULL); } } #endif } #endif void sudden_stop(char *s) { char b[64]; int i; printf("cpu%d: stop - %s\n", core_id, s); #if !defined(WIN32) && !defined(WIN64) if (proxy_pid != 0) { rm_shared_segments(); } #endif if (search_terminated != NULL) { if (*search_terminated != 0) { if (verbose) { printf("cpu%d: termination initiated (%d)\n", core_id, (int) *search_terminated); } } else { if (verbose) { printf("cpu%d: initiated termination\n", core_id); } *search_terminated |= 8; /* sudden_stop */ } if (core_id == 0) { if (((*search_terminated) & 4) /* uerror in one of the cpus */ && !((*search_terminated) & (8|32|128|256))) /* abnormal stop */ { if (errors == 0) errors++; /* we know there is at least 1 */ } wrapup(); /* incomplete stats, but at least something */ } return; } /* else: should rarely happen, take more drastic measures */ if (core_id == 0) /* local root process */ { for (i = 1; i < NCORE; i++) /* not for 0 of course */ { int ignore; #if defined(WIN32) || defined(WIN64) DWORD dwExitCode = 0; GetExitCodeProcess(worker_handles[i], &dwExitCode); if (dwExitCode == STILL_ACTIVE) { TerminateProcess(worker_handles[i], 0); } printf("cpu0: terminate %d %d\n", (int) worker_pids[i], (dwExitCode == STILL_ACTIVE)); #else sprintf(b, "kill -%d %d", (int) SIGKILL, (int) worker_pids[i]); ignore = system(b); /* if this is a proxy: receive half */ printf("cpu0: %s\n", b); #endif } issued_kill++; } else { /* on WIN32/WIN64 -- these merely kills the root process... */ if (was_interrupted == 0) { int ignore; sprintf(b, "kill -%d %d", (int) SIGINT, (int) worker_pids[0]); ignore = system(b); /* warn the root process */ printf("cpu%d: %s\n", core_id, b); issued_kill++; } } } #define iam_alive() is_alive[core_id]++ extern int crash_test(double); extern void crash_reset(void); int someone_crashed(int wait_type) { static double last_value = 0.0; static int count = 0; if (search_terminated == NULL || *search_terminated != 0) { if (!(*search_terminated & (8|32|128|256))) { if (count++ < 100*NCORE) { return 0; } } return 1; } /* check left neighbor only */ if (last_value == is_alive[(core_id + NCORE - 1) % NCORE]) { if (count++ >= 100) /* to avoid unnecessary checks */ { return 1; } return 0; } last_value = is_alive[(core_id + NCORE - 1) % NCORE]; count = 0; crash_reset(); return 0; } void sleep_report(void) { enter_critical(GLOBAL_LOCK); if (verbose) { #ifdef NGQ printf("cpu%d: locks: global %g\tother %g\t", core_id, glock_wait[0], lock_wait - glock_wait[0]); #else printf("cpu%d: locks: GL %g, RQ %g, WQ %g, HT %g\t", core_id, glock_wait[0], glock_wait[1], glock_wait[2], lock_wait - glock_wait[0] - glock_wait[1] - glock_wait[2]); #endif printf("waits: states %g slots %g\n", frame_wait, free_wait); #ifndef NGQ printf("cpu%d: gq [tries %g, room %g, noroom %g]\n", core_id, gq_tries, gq_hasroom, gq_hasnoroom); if (core_id == 0 && (*gr_readmiss >= 1.0 || *gr_readmiss >= 1.0 || *grcnt != 0)) printf("cpu0: gq [readmiss: %g, writemiss: %g cnt %d]\n", *gr_readmiss, *gr_writemiss, *grcnt); #endif } if (free_wait > 1000000.) #ifndef NGQ if (!a_cycles) { printf("hint: this search may be faster with a larger work-queue\n"); printf(" (-DSET_WQ_SIZE=N with N>%g), and/or with -DUSE_DISK\n", GWQ_SIZE/sizeof(SM_frame)); printf(" or with a larger value for -zN (N>%ld)\n", z_handoff); #else { printf("hint: this search may be faster if compiled without -DNGQ, with -DUSE_DISK, "); printf("or with a larger -zN (N>%d)\n", z_handoff); #endif } leave_critical(GLOBAL_LOCK); } #ifndef MAX_DSK_FILE #define MAX_DSK_FILE 1000000 /* default is max 1M states per file */ #endif void multi_usage(FILE *fd) { static int warned = 0; if (warned > 0) { return; } else { warned++; } fprintf(fd, "\n"); fprintf(fd, "Defining multi-core mode:\n\n"); fprintf(fd, " -DDUAL_CORE --> same as -DNCORE=2\n"); fprintf(fd, " -DQUAD_CORE --> same as -DNCORE=4\n"); fprintf(fd, " -DNCORE=N --> enables multi_core verification if N>1\n"); fprintf(fd, "\n"); fprintf(fd, "Additional directives supported in multi-core mode:\n\n"); fprintf(fd, " -DSEP_STATE --> forces separate statespaces instead of a single shared state space\n"); fprintf(fd, " -DNUSE_DISK --> use disk for storing states when a work queue overflows\n"); fprintf(fd, " -DMAX_DSK_FILE --> max nr of states per diskfile (%d)\n", MAX_DSK_FILE); fprintf(fd, " -DFULL_TRAIL --> support full error trails (increases memory use)\n"); fprintf(fd, "\n"); fprintf(fd, "More advanced use (should rarely need changing):\n\n"); fprintf(fd, " To change the nr of states that can be stored in the global queue\n"); fprintf(fd, " (lower numbers allow for more states to be stored, prefer multiples of 8):\n"); fprintf(fd, " -DVMAX=N --> upperbound on statevector for handoffs (N=%d)\n", VMAX); fprintf(fd, " -DPMAX=N --> upperbound on nr of procs (default: N=%d)\n", PMAX); fprintf(fd, " -DQMAX=N --> upperbound on nr of channels (default: N=%d)\n", QMAX); fprintf(fd, "\n"); fprintf(fd, " To set the total amount of memory reserved for the global workqueue:\n"); fprintf(fd, " -DSET_WQ_SIZE=N --> default: N=128 (defined in MBytes)\n\n"); fprintf(fd, " To force the use of a single global heap, instead of separate heaps:\n"); fprintf(fd, " -DGLOB_HEAP\n"); fprintf(fd, "\n"); fprintf(fd, " To define a fct to initialize data before spawning processes (use quotes):\n"); fprintf(fd, " \"-DC_INIT=fct()\"\n"); fprintf(fd, "\n"); fprintf(fd, " Timer settings for termination and crash detection:\n"); fprintf(fd, " -DSHORT_T=N --> timeout for termination detection trigger (N=%g)\n", (double) SHORT_T); fprintf(fd, " -DLONG_T=N --> timeout for giving up on termination detection (N=%g)\n", (double) LONG_T); fprintf(fd, " -DONESECOND --> (1<<29) --> timeout waiting for a free slot -- to check for crash\n"); fprintf(fd, " -DT_ALERT --> collect stats on crash alert timeouts\n\n"); fprintf(fd, "Help with Linux/Windows/Cygwin configuration for multi-core:\n"); fprintf(fd, " http://spinroot.com/spin/multicore/V5_Readme.html\n"); fprintf(fd, "\n"); } #if NCORE>1 && defined(FULL_TRAIL) typedef struct Stack_Tree { uchar pr; /* process that made transition */ T_ID t_id; /* id of transition */ volatile struct Stack_Tree *prv; /* backward link towards root */ } Stack_Tree; H_el *grab_shared(int); volatile Stack_Tree **stack_last; /* in shared memory */ char *stack_cache = NULL; /* local */ int nr_cached = 0; /* local */ #ifndef CACHE_NR #define CACHE_NR 1024 #endif volatile Stack_Tree * stack_prefetch(void) { volatile Stack_Tree *st; if (nr_cached == 0) { stack_cache = (char *) grab_shared(CACHE_NR * sizeof(Stack_Tree)); nr_cached = CACHE_NR; } st = (volatile Stack_Tree *) stack_cache; stack_cache += sizeof(Stack_Tree); nr_cached--; return st; } void Push_Stack_Tree(short II, T_ID t_id) { volatile Stack_Tree *st; st = (volatile Stack_Tree *) stack_prefetch(); st->pr = II; st->t_id = t_id; st->prv = (Stack_Tree *) stack_last[core_id]; stack_last[core_id] = st; } void Pop_Stack_Tree(void) { volatile Stack_Tree *cf = stack_last[core_id]; if (cf) { stack_last[core_id] = cf->prv; } else if (nr_handoffs * z_handoff + depth > 0) { printf("cpu%d: error pop_stack_tree (depth %ld)\n", core_id, depth); } } #endif void e_critical(int which) { double cnt_start; if (readtrail || iamin[which] > 0) { if (!readtrail && verbose) { printf("cpu%d: Double Lock on %d (now %d)\n", core_id, which, iamin[which]+1); fflush(stdout); } iamin[which]++; /* local variable */ return; } cnt_start = lock_wait; while (sh_lock != NULL) /* as long as we have shared memory */ { int r = tas(&sh_lock[which]); if (r == 0) { iamin[which] = 1; return; /* locked */ } lock_wait++; #ifndef NGQ if (which < 3) { glock_wait[which]++; } #else if (which == 0) { glock_wait[which]++; } #endif iam_alive(); if (lock_wait - cnt_start > TenSeconds) { printf("cpu%d: lock timeout on %d\n", core_id, which); cnt_start = lock_wait; if (someone_crashed(1)) { sudden_stop("lock timeout"); pan_exit(1); } } } } void x_critical(int which) { if (iamin[which] != 1) { if (iamin[which] > 1) { iamin[which]--; /* this is thread-local - no races on this one */ if (!readtrail && verbose) { printf("cpu%d: Partial Unlock on %d (%d more needed)\n", core_id, which, iamin[which]); fflush(stdout); } return; } else /* iamin[which] <= 0 */ { if (!readtrail) { printf("cpu%d: Invalid Unlock iamin[%d] = %d\n", core_id, which, iamin[which]); fflush(stdout); } return; } } if (sh_lock != NULL) { iamin[which] = 0; #if defined(__powerpc64__) #if 1 __sync_synchronize(); /* srirajpaul */ #else __lwsync(); /* xlc compiler only */ #endif #endif sh_lock[which] = 0; /* unlock */ } } void #if defined(WIN32) || defined(WIN64) start_proxy(char *s, DWORD r_pid) #else start_proxy(char *s, int r_pid) #endif { char Q_arg[16], Z_arg[16], Y_arg[16]; char *args[32], *ptr; int argcnt = 0; sprintf(Q_arg, "-Q%d", getpid()); sprintf(Y_arg, "-Y%d", r_pid); sprintf(Z_arg, "-Z%d", proxy_pid /* core_id */); args[argcnt++] = "proxy"; args[argcnt++] = s; /* -r or -s */ args[argcnt++] = Q_arg; args[argcnt++] = Z_arg; args[argcnt++] = Y_arg; if (strlen(o_cmdline) > 0) { ptr = o_cmdline; /* assume args separated by spaces */ do { args[argcnt++] = ptr++; if ((ptr = strchr(ptr, ' ')) != NULL) { while (*ptr == ' ') { *ptr++ = '\0'; } } else { break; } } while (argcnt < 31); } args[argcnt] = NULL; #if defined(WIN32) || defined(WIN64) execvp("pan_proxy", args); /* no return */ #else execvp("./pan_proxy", args); /* no return */ #endif Uerror("pan_proxy exec failed"); } /*** end of common code fragment ***/ #if !defined(WIN32) && !defined(WIN64) void init_shm(void) /* initialize shared work-queues - linux/cygwin */ { key_t key[NR_QS]; int n, m; int must_exit = 0; if (core_id == 0 && verbose) { printf("cpu0: step 3: allocate shared workqueues %g MB\n", ((double) NCORE * LWQ_SIZE + GWQ_SIZE) / (1048576.) ); } for (m = 0; m < NR_QS; m++) /* last q is the global q */ { double qsize = (m == NCORE) ? GWQ_SIZE : LWQ_SIZE; key[m] = ftok(PanSource, m+1); if (key[m] == -1) { perror("ftok shared queues"); must_exit = 1; break; } if (core_id == 0) /* root creates */ { /* check for stale copy */ shmid[m] = shmget(key[m], (size_t) qsize, 0600); if (shmid[m] != -1) /* yes there is one; remove it */ { printf("cpu0: removing stale q%d, status: %d\n", m, shmctl(shmid[m], IPC_RMID, NULL)); } shmid[m] = shmget(key[m], (size_t) qsize, 0600|IPC_CREAT|IPC_EXCL); memcnt += qsize; } else /* workers attach */ { shmid[m] = shmget(key[m], (size_t) qsize, 0600); /* never called, since we create shm *before* we fork */ } if (shmid[m] == -1) { perror("shmget shared queues"); must_exit = 1; break; } shared_mem[m] = (char *) shmat(shmid[m], (void *) 0, 0); /* attach */ if (shared_mem[m] == (char *) -1) { fprintf(stderr, "error: cannot attach shared wq %d (%d Mb)\n", m+1, (int) (qsize/(1048576.))); perror("shmat shared queues"); must_exit = 1; break; } m_workq[m] = (SM_frame *) shared_mem[m]; if (core_id == 0) { int nframes = (m == NCORE) ? GN_FRAMES : LN_FRAMES; for (n = 0; n < nframes; n++) { m_workq[m][n].m_vsize = 0; m_workq[m][n].m_boq = 0; } } } if (must_exit) { rm_shared_segments(); fprintf(stderr, "pan: check './pan --' for usage details\n"); pan_exit(1); /* calls cleanup_shm */ } } static uchar * prep_shmid_S(size_t n) /* either sets SS or H_tab, linux/cygwin */ { char *rval; #ifndef SEP_STATE key_t key; if (verbose && core_id == 0) { #ifdef BITSTATE printf("cpu0: step 1: allocate shared bitstate %g Mb\n", (double) n / (1048576.)); #else printf("cpu0: step 1: allocate shared hastable %g Mb\n", (double) n / (1048576.)); #endif } #ifdef MEMLIM if (memcnt + (double) n > memlim) { printf("cpu0: S %8g + %d Kb exceeds memory limit of %8g Mb\n", memcnt/1024., (int) (n/1024), memlim/(1048576.)); printf("cpu0: insufficient memory -- aborting\n"); exit(1); } #endif key = ftok(PanSource, NCORE+2); /* different from queues */ if (key == -1) { perror("ftok shared bitstate or hashtable"); fprintf(stderr, "pan: check './pan --' for usage details\n"); pan_exit(1); } if (core_id == 0) /* root */ { shmid_S = shmget(key, n, 0600); if (shmid_S != -1) { printf("cpu0: removing stale segment, status: %d\n", (int) shmctl(shmid_S, IPC_RMID, NULL)); } shmid_S = shmget(key, n, 0600 | IPC_CREAT | IPC_EXCL); memcnt += (double) n; } else /* worker */ { shmid_S = shmget(key, n, 0600); } if (shmid_S == -1) { perror("shmget shared bitstate or hashtable too large?"); fprintf(stderr, "pan: check './pan --' for usage details\n"); pan_exit(1); } rval = (char *) shmat(shmid_S, (void *) 0, 0); /* attach */ if ((char *) rval == (char *) -1) { perror("shmat shared bitstate or hashtable"); fprintf(stderr, "pan: check './pan --' for usage details\n"); pan_exit(1); } #else rval = (char *) emalloc(n); #endif return (uchar *) rval; } #define TRY_AGAIN 1 #define NOT_AGAIN 0 static char shm_prep_result; static uchar * prep_state_mem(size_t n) /* sets memory arena for states linux/cygwin */ { char *rval; key_t key; static int cnt = 3; /* start larger than earlier ftok calls */ shm_prep_result = NOT_AGAIN; /* default */ if (verbose && core_id == 0) { printf("cpu0: step 2+: pre-allocate memory arena %d of %6.2g Mb\n", cnt-3, (double) n / (1048576.)); } #ifdef MEMLIM if (memcnt + (double) n > memlim) { printf("cpu0: error: M %.0f + %.0f Kb exceeds memory limit of %.0f Mb\n", memcnt/1024.0, (double) n/1024.0, memlim/(1048576.)); return NULL; } #endif key = ftok(PanSource, NCORE+cnt); cnt++; if (key == -1) { perror("ftok T"); printf("pan: check './pan --' for usage details\n"); pan_exit(1); } if (core_id == 0) { shmid_M = shmget(key, n, 0600); if (shmid_M != -1) { printf("cpu0: removing stale memory segment %d, status: %d\n", cnt-3, shmctl(shmid_M, IPC_RMID, NULL)); } shmid_M = shmget(key, n, 0600 | IPC_CREAT | IPC_EXCL); /* memcnt += (double) n; -- only amount actually used is counted */ } else { shmid_M = shmget(key, n, 0600); } if (shmid_M == -1) { if (verbose) { printf("error: failed to get pool of shared memory %d of %.0f Mb\n", cnt-3, ((double)n)/(1048576.)); perror("state mem"); printf("pan: check './pan --' for usage details\n"); } shm_prep_result = TRY_AGAIN; return NULL; } rval = (char *) shmat(shmid_M, (void *) 0, 0); /* attach */ if ((char *) rval == (char *) -1) { printf("cpu%d error: failed to attach pool of shared memory %d of %.0f Mb\n", core_id, cnt-3, ((double)n)/(1048576.)); perror("state mem"); return NULL; } return (uchar *) rval; } void init_HT(unsigned long n) /* cygwin/linux version */ { volatile char *x; double get_mem; #ifndef SEP_STATE volatile char *dc_mem_start; double need_mem, got_mem = 0.; #endif #ifdef SEP_STATE #ifndef MEMLIM if (verbose) { printf("cpu0: steps 0,1: no -DMEMLIM set\n"); } #else if (verbose) { printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n", MEMLIM, ((double)n/(1048576.)), (((double) NCORE * LWQ_SIZE) + GWQ_SIZE) /(1048576.) ); } #endif get_mem = NCORE * sizeof(double) + (1 + CS_NR) * sizeof(void *) + 4*sizeof(void *) + 2*sizeof(double); /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */ get_mem += 4 * NCORE * sizeof(void *); /* prfree, prfull, prcnt, prmax */ #ifdef FULL_TRAIL get_mem += (NCORE) * sizeof(Stack_Tree *); /* NCORE * stack_last */ #endif x = (volatile char *) prep_state_mem((size_t) get_mem); /* work queues and basic structs */ shmid_X = (long) x; if (x == NULL) { printf("cpu0: could not allocate shared memory, see ./pan --\n"); exit(1); } search_terminated = (volatile unsigned int *) x; /* comes first */ x += sizeof(void *); /* maintain alignment */ is_alive = (volatile double *) x; x += NCORE * sizeof(double); sh_lock = (volatile int *) x; x += CS_NR * sizeof(void *); grfree = (volatile int *) x; x += sizeof(void *); grfull = (volatile int *) x; x += sizeof(void *); grcnt = (volatile int *) x; x += sizeof(void *); grmax = (volatile int *) x; x += sizeof(void *); prfree = (volatile int *) x; x += NCORE * sizeof(void *); prfull = (volatile int *) x; x += NCORE * sizeof(void *); prcnt = (volatile int *) x; x += NCORE * sizeof(void *); prmax = (volatile int *) x; x += NCORE * sizeof(void *); gr_readmiss = (volatile double *) x; x += sizeof(double); gr_writemiss = (volatile double *) x; x += sizeof(double); #ifdef FULL_TRAIL stack_last = (volatile Stack_Tree **) x; x += NCORE * sizeof(Stack_Tree *); #endif #ifndef BITSTATE H_tab = (H_el **) emalloc(n); #endif #else #ifndef MEMLIM #warning MEMLIM not set #define MEMLIM (2048) #endif if (core_id == 0 && verbose) { printf("cpu0: step 0: -DMEMLIM=%d Mb minus hashtable+workqs (%g + %g Mb) leaves %g Mb\n", MEMLIM, ((double)n/(1048576.)), (NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.), (memlim - memcnt - (double) n - (NCORE * LWQ_SIZE + GWQ_SIZE))/(1048576.)); } #ifndef BITSTATE H_tab = (H_el **) prep_shmid_S((size_t) n); /* hash_table */ #endif need_mem = memlim - memcnt - ((double) NCORE * LWQ_SIZE) - GWQ_SIZE; if (need_mem <= 0.) { Uerror("internal error -- shared state memory"); } if (core_id == 0 && verbose) { printf("cpu0: step 2: pre-allocate shared state memory %g Mb\n", need_mem/(1048576.)); } #ifdef SEP_HEAP SEG_SIZE = need_mem / NCORE; if (verbose && core_id == 0) { printf("cpu0: setting segsize to %6g MB\n", SEG_SIZE/(1048576.)); } #if defined(CYGWIN) || defined(__CYGWIN__) if (SEG_SIZE > 512.*1024.*1024.) { printf("warning: reducing SEG_SIZE of %g MB to 512MB (exceeds max for Cygwin)\n", SEG_SIZE/(1024.*1024.)); SEG_SIZE = 512.*1024.*1024.; } #endif #endif mem_reserved = need_mem; while (need_mem > 1024.) { get_mem = need_mem; shm_more: if (get_mem > (double) SEG_SIZE) { get_mem = (double) SEG_SIZE; } if (get_mem <= 0.0) break; /* for allocating states: */ x = dc_mem_start = (volatile char *) prep_state_mem((size_t) get_mem); if (x == NULL) { if (shm_prep_result == NOT_AGAIN || first_pool != NULL || SEG_SIZE < (16. * 1048576.)) { break; } SEG_SIZE /= 2.; if (verbose) { printf("pan: lowered segsize to %f\n", SEG_SIZE); } if (SEG_SIZE >= 1024.) { goto shm_more; } break; } need_mem -= get_mem; got_mem += get_mem; if (first_pool == NULL) { search_terminated = (volatile unsigned int *) x; /* comes first */ x += sizeof(void *); /* maintain alignment */ is_alive = (volatile double *) x; x += NCORE * sizeof(double); sh_lock = (volatile int *) x; x += CS_NR * sizeof(void *); grfree = (volatile int *) x; x += sizeof(void *); grfull = (volatile int *) x; x += sizeof(void *); grcnt = (volatile int *) x; x += sizeof(void *); grmax = (volatile int *) x; x += sizeof(void *); prfree = (volatile int *) x; x += NCORE * sizeof(void *); prfull = (volatile int *) x; x += NCORE * sizeof(void *); prcnt = (volatile int *) x; x += NCORE * sizeof(void *); prmax = (volatile int *) x; x += NCORE * sizeof(void *); gr_readmiss = (volatile double *) x; x += sizeof(double); gr_writemiss = (volatile double *) x; x += sizeof(double); #ifdef FULL_TRAIL stack_last = (volatile Stack_Tree **) x; x += NCORE * sizeof(Stack_Tree *); #endif if (((long)x)&(sizeof(void *)-1)) /* 64-bit word alignment */ { x += sizeof(void *)-(((long)x)&(sizeof(void *)-1)); } #ifdef COLLAPSE ncomps = (unsigned long *) x; x += (256+2) * sizeof(unsigned long); #endif } dc_shared = (sh_Allocater *) x; /* must be in shared memory */ x += sizeof(sh_Allocater); if (core_id == 0) /* root only */ { dc_shared->dc_id = shmid_M; dc_shared->dc_start = dc_mem_start; dc_shared->dc_arena = x; dc_shared->pattern = 1234567; /* protection */ dc_shared->dc_size = (long) get_mem - (long) (x - dc_mem_start); dc_shared->nxt = (long) 0; if (last_pool == NULL) { first_pool = last_pool = dc_shared; } else { last_pool->nxt = dc_shared; last_pool = dc_shared; } } else if (first_pool == NULL) { first_pool = dc_shared; } } if (need_mem > 1024.) { printf("cpu0: could allocate only %g Mb of shared memory (wanted %g more)\n", got_mem/(1048576.), need_mem/(1048576.)); } if (!first_pool) { printf("cpu0: insufficient memory -- aborting.\n"); exit(1); } /* we are still single-threaded at this point, with core_id 0 */ dc_shared = first_pool; #endif } void cleanup_shm(int val) { volatile sh_Allocater *nxt_pool; unsigned long cnt = 0; int m; if (nibis != 0) { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id, val); return; } else { nibis = 1; } if (search_terminated != NULL) { *search_terminated |= 16; /* cleanup_shm */ } for (m = 0; m < NR_QS; m++) { if (shmdt((void *) shared_mem[m]) > 0) { perror("shmdt detaching from shared queues"); } } #ifdef SEP_STATE if (shmdt((void *) shmid_X) != 0) { perror("shmdt detaching from shared state memory"); } #else #ifdef BITSTATE if (SS > 0 && shmdt((void *) SS) != 0) { if (verbose) { perror("shmdt detaching from shared bitstate arena"); } } #else if (core_id == 0) { /* before detaching: */ for (nxt_pool = dc_shared; nxt_pool != NULL; nxt_pool = nxt_pool->nxt) { cnt += nxt_pool->dc_size; } if (verbose) { printf("cpu0: done, %ld Mb of shared state memory left\n", cnt / (long)(1048576)); } } if (shmdt((void *) H_tab) != 0) { perror("shmdt detaching from shared hashtable"); } for (last_pool = first_pool; last_pool != NULL; last_pool = nxt_pool) { nxt_pool = last_pool->nxt; if (shmdt((void *) last_pool->dc_start) != 0) { perror("shmdt detaching from shared state memory"); } } first_pool = last_pool = NULL; /* precaution */ #endif #endif /* detached from shared memory - so cannot use cpu_printf */ if (verbose) { printf("cpu%d: done -- got %ld states from queue\n", core_id, nstates_get); } } extern void give_up(int); extern void Read_Queue(int); void mem_get(void) { SM_frame *f; int is_parent; #if defined(MA) && !defined(SEP_STATE) #error MA without SEP_STATE is not supported with multi-core #endif #ifdef BFS #error instead of -DNCORE -DBFS use -DBFS_PAR #endif #ifdef SC #error SC is not supported with multi-core #endif init_shm(); /* we are single threaded when this starts */ if (core_id == 0 && verbose) { printf("cpu0: step 4: calling fork()\n"); } fflush(stdout); /* if NCORE > 1 the child or the parent should fork N-1 more times * the parent is the only process with core_id == 0 and is_parent > 0 * the workers have is_parent = 0 and core_id = 1..NCORE-1 */ if (core_id == 0) { worker_pids[0] = getpid(); /* for completeness */ while (++core_id < NCORE) /* first worker sees core_id = 1 */ { is_parent = fork(); if (is_parent == -1) { Uerror("fork failed"); } if (is_parent == 0) /* this is a worker process */ { if (proxy_pid == core_id) /* always non-zero */ { start_proxy("-r", 0); /* no return */ } goto adapt; /* root process continues spawning */ } worker_pids[core_id] = is_parent; } /* note that core_id is now NCORE */ if (proxy_pid > 0 && proxy_pid < NCORE) { proxy_pid_snd = fork(); if (proxy_pid_snd == -1) { Uerror("proxy fork failed"); } if (proxy_pid_snd == 0) { start_proxy("-s", worker_pids[proxy_pid]); /* no return */ } } /* else continue */ if (is_parent > 0) { core_id = 0; /* reset core_id for root process */ } } else /* worker */ { static char db0[16]; /* good for up to 10^6 cores */ static char db1[16]; adapt: tprefix = db0; sprefix = db1; sprintf(tprefix, "cpu%d_trail", core_id); sprintf(sprefix, "cpu%d_rst", core_id); memcnt = 0; /* count only additionally allocated memory */ } signal(SIGINT, give_up); if (proxy_pid == 0) /* not in a cluster setup, pan_proxy must attach */ { rm_shared_segments(); /* mark all shared segments for removal on exit */ } if (verbose) { cpu_printf("starting core_id %d -- pid %d\n", core_id, getpid()); } #if defined(SEP_HEAP) && !defined(SEP_STATE) { int i; volatile sh_Allocater *ptr; ptr = first_pool; for (i = 0; i < NCORE && ptr != NULL; i++) { if (i == core_id) { my_heap = (char *) ptr->dc_arena; my_size = (long) ptr->dc_size; if (verbose) cpu_printf("local heap %ld MB\n", my_size/(1048576)); break; } ptr = ptr->nxt; /* local */ } if (my_heap == NULL) { printf("cpu%d: no local heap\n", core_id); pan_exit(1); } /* else */ #if defined(CYGWIN) || defined(__CYGWIN__) ptr = first_pool; for (i = 0; i < NCORE && ptr != NULL; i++) { ptr = ptr->nxt; /* local */ } dc_shared = ptr; /* any remainder */ #else dc_shared = NULL; /* used all mem for local heaps */ #endif } #endif if (core_id == 0 && !remote_party) { new_state(); /* cpu0 explores root */ if (verbose) cpu_printf("done with 1st dfs, nstates %g (put %d states), read q\n", nstates, nstates_put); dfs_phase2 = 1; } Read_Queue(core_id); /* all cores */ if (verbose) { cpu_printf("put %6d states into queue -- got %6d\n", nstates_put, nstates_get); } if (proxy_pid != 0) { rm_shared_segments(); } done = 1; wrapup(); exit(0); } #else int unpack_state(SM_frame *, int); #endif H_el * grab_shared(int n) { #ifndef SEP_STATE char *rval = (char *) 0; if (n == 0) { printf("cpu%d: grab shared zero\n", core_id); fflush(stdout); return (H_el *) rval; } else if (n&(sizeof(void *)-1)) { n += sizeof(void *)-(n&(sizeof(void *)-1)); /* alignment */ } #ifdef SEP_HEAP /* no locking */ if (my_heap != NULL && my_size > n) { rval = my_heap; my_heap += n; my_size -= n; goto done; } #endif if (!dc_shared) { sudden_stop("pan: out of memory"); } /* another lock is always already in effect when this is called */ /* but not always the same lock -- i.e., on different parts of the hashtable */ enter_critical(GLOBAL_LOCK); /* this must be independently mutex */ #if defined(SEP_HEAP) && !defined(WIN32) && !defined(WIN64) { static int noted = 0; if (!noted) { noted = 1; printf("cpu%d: global heap has %ld bytes left, needed %d\n", core_id, dc_shared?dc_shared->dc_size:0, n); } } #endif #if 0 if (dc_shared->pattern != 1234567) { leave_critical(GLOBAL_LOCK); Uerror("overrun -- memory corruption"); } #endif if (dc_shared->dc_size < n) { if (verbose) { printf("Next Pool %g Mb + %d\n", memcnt/(1048576.), n); } if (dc_shared->nxt == NULL || dc_shared->nxt->dc_arena == NULL || dc_shared->nxt->dc_size < n) { printf("cpu%d: memcnt %g Mb + wanted %d bytes more\n", core_id, memcnt / (1048576.), n); leave_critical(GLOBAL_LOCK); sudden_stop("out of memory -- aborting"); wrapup(); /* exits */ } else { dc_shared = (sh_Allocater *) dc_shared->nxt; } } rval = (char *) dc_shared->dc_arena; dc_shared->dc_arena += n; dc_shared->dc_size -= (long) n; #if 0 if (VVERBOSE) printf("cpu%d grab shared (%d bytes) -- %ld left\n", core_id, n, dc_shared->dc_size); #endif leave_critical(GLOBAL_LOCK); done: memset(rval, 0, n); memcnt += (double) n; return (H_el *) rval; #else return (H_el *) emalloc(n); #endif } SM_frame * Get_Full_Frame(int n) { SM_frame *f; double cnt_start = frame_wait; f = &m_workq[n][prfull[n]]; while (f->m_vsize == 0) /* await full slot LOCK : full frame */ { iam_alive(); #ifndef NGQ #ifndef SAFETY if (!a_cycles || core_id != 0) #endif if (*grcnt > 0) /* accessed outside lock, but safe even if wrong */ { enter_critical(GQ_RD); /* gq - read access */ if (*grcnt > 0) /* could have changed */ { f = &m_workq[NCORE][*grfull]; /* global q */ if (f->m_vsize == 0) { /* writer is still filling the slot */ *gr_writemiss++; f = &m_workq[n][prfull[n]]; /* reset */ } else { *grfull = (*grfull+1) % (GN_FRAMES); enter_critical(GQ_WR); *grcnt = *grcnt - 1; leave_critical(GQ_WR); leave_critical(GQ_RD); return f; } } leave_critical(GQ_RD); } #endif if (frame_wait++ - cnt_start > Delay) { if (0) { cpu_printf("timeout on q%d -- %u -- query %d\n", n, f, query_in_progress); } return (SM_frame *) 0; /* timeout */ } } iam_alive(); if (VVERBOSE) cpu_printf("got frame from q%d\n", n); prfull[n] = (prfull[n] + 1) % (LN_FRAMES); enter_critical(QLOCK(n)); prcnt[n]--; /* lock out increments */ leave_critical(QLOCK(n)); return f; } SM_frame * Get_Free_Frame(int n) { SM_frame *f; double cnt_start = free_wait; if (VVERBOSE) { cpu_printf("get free frame from q%d\n", n); } if (n == NCORE) /* global q */ { f = &(m_workq[n][lrfree]); } else { f = &(m_workq[n][prfree[n]]); } while (f->m_vsize != 0) /* await free slot LOCK : free slot */ { iam_alive(); if (free_wait++ - cnt_start > OneSecond) { if (verbose) { cpu_printf("timeout waiting for free slot q%d\n", n); } cnt_start = free_wait; if (someone_crashed(1)) { printf("cpu%d: search terminated\n", core_id); sudden_stop("get free frame"); pan_exit(1); } } } if (n != NCORE) { prfree[n] = (prfree[n] + 1) % (LN_FRAMES); enter_critical(QLOCK(n)); prcnt[n]++; /* lock out decrements */ if (prmax[n] < prcnt[n]) { prmax[n] = prcnt[n]; } leave_critical(QLOCK(n)); } return f; } #ifndef NGQ int GlobalQ_HasRoom(void) { int rval = 0; gq_tries++; if (*grcnt < GN_FRAMES) /* there seems to be room */ { enter_critical(GQ_WR); /* gq write access */ if (*grcnt < GN_FRAMES) { if (m_workq[NCORE][*grfree].m_vsize != 0) { /* can happen if reader is slow emptying slot */ *gr_readmiss++; goto out; /* dont wait: release lock and return */ } lrfree = *grfree; /* Get_Free_Frame use lrfree in this mode */ *grfree = (*grfree + 1) % GN_FRAMES; *grcnt = *grcnt + 1; /* count nr of slots filled -- no additional lock needed */ if (*grmax < *grcnt) *grmax = *grcnt; leave_critical(GQ_WR); /* for short lock duration */ gq_hasroom++; mem_put(NCORE); /* copy state into reserved slot */ rval = 1; /* successfull handoff */ } else { gq_hasnoroom++; out: leave_critical(GQ_WR); } } return rval; } #endif int unpack_state(SM_frame *f, int from_q) { int i, j; static H_el D_State; if (f->m_vsize > 0) { boq = f->m_boq; if (boq > 256) { cpu_printf("saw control %d, expected state\n", boq); return 0; } vsize = f->m_vsize; correct: memcpy((uchar *) &now, (uchar *) f->m_now, vsize); #if !defined(NOCOMP) && !defined(HC) for (i = j = 0; i < VMAX; i++, j = (j+1)%8) { Mask[i] = (f->m_mask[i/8] & (1< 0) { memcpy((uchar *) proc_offset, (uchar *) f->m_p_offset, now._nr_pr * sizeof(OFFT)); memcpy((uchar *) proc_skip, (uchar *) f->m_p_skip, now._nr_pr * sizeof(uchar)); } if (now._nr_qs > 0) { memcpy((uchar *) q_offset, (uchar *) f->m_q_offset, now._nr_qs * sizeof(OFFT)); memcpy((uchar *) q_skip, (uchar *) f->m_q_skip, now._nr_qs * sizeof(uchar)); } #ifndef NOVSZ if (vsize != now._vsz) { cpu_printf("vsize %d != now._vsz %d (type %d) %d\n", vsize, now._vsz, f->m_boq, f->m_vsize); vsize = now._vsz; goto correct; /* rare event: a race */ } #endif hmax = max(hmax, vsize); if (f != &cur_Root) { memcpy((uchar *) &cur_Root, (uchar *) f, sizeof(SM_frame)); } if (((now._a_t) & 1) == 1) /* i.e., when starting nested DFS */ { A_depth = depthfound = 0; memcpy((uchar *)&A_Root, (uchar *)&now, vsize); } nr_handoffs = f->nr_handoffs; } else { cpu_printf("pan: state empty\n"); } depth = 0; trpt = &trail[1]; trpt->tau = f->m_tau; trpt->o_pm = f->m_o_pm; (trpt-1)->ostate = &D_State; /* stub */ trpt->ostate = &D_State; #ifdef FULL_TRAIL if (upto > 0) { stack_last[core_id] = (Stack_Tree *) f->m_stack; } #if defined(VERBOSE) if (stack_last[core_id]) { cpu_printf("%d: UNPACK -- SET m_stack %u (%d,%d)\n", depth, stack_last[core_id], stack_last[core_id]->pr, stack_last[core_id]->t_id); } #endif #endif if (!trpt->o_t) { static Trans D_Trans; trpt->o_t = &D_Trans; } #ifdef VERI if ((trpt->tau & 4) != 4) { trpt->tau |= 4; /* the claim moves first */ cpu_printf("warning: trpt was not up to date\n"); } #endif for (i = 0; i < (int) now._nr_pr; i++) { P0 *ptr = (P0 *) pptr(i); #ifndef NP if (accpstate[ptr->_t][ptr->_p]) { trpt->o_pm |= 2; } #else if (progstate[ptr->_t][ptr->_p]) { trpt->o_pm |= 4; } #endif } #ifdef EVENT_TRACE #ifndef NP if (accpstate[EVENT_TRACE][now._event]) { trpt->o_pm |= 2; } #else if (progstate[EVENT_TRACE][now._event]) { trpt->o_pm |= 4; } #endif #endif #if defined(C_States) && (HAS_TRACK==1) /* restore state of tracked C objects */ c_revert((uchar *) &(now.c_state[0])); #if (HAS_STACK==1) c_unstack((uchar *) f->m_c_stack); /* unmatched tracked data */ #endif #endif return 1; } void write_root(void) /* for trail file */ { int fd; if (iterative == 0 && Nr_Trails > 1) sprintf(fnm, "%s%d.%s", TrailFile, Nr_Trails-1, sprefix); else sprintf(fnm, "%s.%s", TrailFile, sprefix); if (cur_Root.m_vsize == 0) { (void) unlink(fnm); /* remove possible old copy */ return; /* its the default initial state */ } if ((fd = creat(fnm, TMODE)) < 0) { char *q; if ((q = strchr(TrailFile, '.'))) { *q = '\0'; /* strip .pml */ if (iterative == 0 && Nr_Trails-1 > 0) sprintf(fnm, "%s%d.%s", TrailFile, Nr_Trails-1, sprefix); else sprintf(fnm, "%s.%s", TrailFile, sprefix); *q = '.'; fd = creat(fnm, TMODE); } if (fd < 0) { cpu_printf("pan: cannot create %s\n", fnm); perror("cause"); return; } } if (write(fd, &cur_Root, sizeof(SM_frame)) != sizeof(SM_frame)) { cpu_printf("pan: error writing %s\n", fnm); } else { cpu_printf("pan: wrote %s\n", fnm); } close(fd); } void set_root(void) { int fd; char *q; char MyFile[512]; char MySuffix[16]; char *ssuffix = "rst"; int try_core = 1; strcpy(MyFile, TrailFile); try_again: if (whichtrail > 0) { sprintf(fnm, "%s%d.%s", MyFile, whichtrail, ssuffix); fd = open(fnm, O_RDONLY, 0); if (fd < 0 && (q = strchr(MyFile, '.'))) { *q = '\0'; /* strip .pml */ sprintf(fnm, "%s%d.%s", MyFile, whichtrail, ssuffix); *q = '.'; fd = open(fnm, O_RDONLY, 0); } } else { sprintf(fnm, "%s.%s", MyFile, ssuffix); fd = open(fnm, O_RDONLY, 0); if (fd < 0 && (q = strchr(MyFile, '.'))) { *q = '\0'; /* strip .pml */ sprintf(fnm, "%s.%s", MyFile, ssuffix); *q = '.'; fd = open(fnm, O_RDONLY, 0); } } if (fd < 0) { if (try_core < NCORE) { ssuffix = MySuffix; sprintf(ssuffix, "cpu%d_rst", try_core++); goto try_again; } cpu_printf("no file '%s.rst' or '%s' (not an error)\n", MyFile, fnm); } else { if (read(fd, &cur_Root, sizeof(SM_frame)) != sizeof(SM_frame)) { cpu_printf("read error %s\n", fnm); close(fd); pan_exit(1); } close(fd); (void) unpack_state(&cur_Root, -2); #ifdef SEP_STATE cpu_printf("partial trail -- last few steps only\n"); #endif cpu_printf("restored root from '%s'\n", fnm); printf("=====State:=====\n"); { int i, j; P0 *z; for (i = 0; i < now._nr_pr; i++) { z = (P0 *)pptr(i); printf("proc %2d (%s) ", i, procname[z->_t]); for (j = 0; src_all[j].src; j++) if (src_all[j].tp == (int) z->_t) { printf(" %s:%d ", PanSource, src_all[j].src[z->_p]); break; } printf("(state %d)\n", z->_p); c_locals(i, z->_t); } c_globals(); } printf("================\n"); } } #ifdef USE_DISK unsigned long dsk_written, dsk_drained; void mem_drain(void); #endif void m_clear_frame(SM_frame *f) { int i, clr_sz = sizeof(SM_results); for (i = 0; i <= _NP_; i++) /* all proctypes */ { clr_sz += NrStates[i]*sizeof(uchar); } memset(f, 0, clr_sz); /* caution if sizeof(SM_results) > sizeof(SM_frame) */ } #define TargetQ_Full(n) (m_workq[n][prfree[n]].m_vsize != 0) #define TargetQ_NotFull(n) (m_workq[n][prfree[n]].m_vsize == 0) int AllQueuesEmpty(void) { int q; #ifndef NGQ if (*grcnt != 0) { return 0; } #endif for (q = 0; q < NCORE; q++) { if (prcnt[q] != 0) { return 0; } } return 1; } void Read_Queue(int q) { SM_frame *f, *of; int remember, target_q; SM_results *r; double patience = 0.0; target_q = (q + 1) % NCORE; for (;;) { f = Get_Full_Frame(q); if (!f) /* 1 second timeout -- and trigger for Query */ { if (someone_crashed(2)) { printf("cpu%d: search terminated [code %d]\n", core_id, search_terminated?*search_terminated:-1); sudden_stop(""); pan_exit(1); } #ifdef TESTING /* to profile with cc -pg and gprof pan.exe -- set handoff depth beyond maxdepth */ exit(0); #endif remember = *grfree; if (core_id == 0 /* root can initiate termination */ && remote_party == 0 /* and only the original root */ && query_in_progress == 0 /* unless its already in progress */ && AllQueuesEmpty()) { f = Get_Free_Frame(target_q); query_in_progress = 1; /* only root process can do this */ if (!f) { Uerror("Fatal1: no free slot"); } f->m_boq = QUERY; /* initiate Query */ if (verbose) { cpu_printf("snd QUERY to q%d (%d) into slot %d\n", target_q, nstates_get + 1, prfree[target_q]-1); } f->m_vsize = remember + 1; /* number will not change unless we receive more states */ } else if (patience++ > OneHour) /* one hour watchdog timer */ { cpu_printf("timeout -- giving up\n"); sudden_stop("queue timeout"); pan_exit(1); } if (0) cpu_printf("timed out -- try again\n"); continue; } patience = 0.0; /* reset watchdog */ if (f->m_boq == QUERY) { if (verbose) { cpu_printf("got QUERY on q%d (%d <> %d) from slot %d\n", q, f->m_vsize, nstates_put + 1, prfull[q]-1); snapshot(); } remember = f->m_vsize; f->m_vsize = 0; /* release slot */ if (core_id == 0 && remote_party == 0) /* original root cpu0 */ { if (query_in_progress == 1 /* didn't send more states in the interim */ && *grfree + 1 == remember) /* no action on global queue meanwhile */ { if (verbose) cpu_printf("Termination detected\n"); if (TargetQ_Full(target_q)) { if (verbose) cpu_printf("warning: target q is full\n"); } f = Get_Free_Frame(target_q); if (!f) { Uerror("Fatal2: no free slot"); } m_clear_frame(f); f->m_boq = QUIT; /* send final Quit, collect stats */ f->m_vsize = 111; /* anything non-zero will do */ if (verbose) cpu_printf("put QUIT on q%d\n", target_q); } else { if (verbose) cpu_printf("Stale Query\n"); #ifdef USE_DISK mem_drain(); #endif } query_in_progress = 0; } else { if (TargetQ_Full(target_q)) { if (verbose) cpu_printf("warning: forward query - target q full\n"); } f = Get_Free_Frame(target_q); if (verbose) cpu_printf("snd QUERY response to q%d (%d <> %d) in slot %d\n", target_q, remember, *grfree + 1, prfree[target_q]-1); if (!f) { Uerror("Fatal4: no free slot"); } if (*grfree + 1 == remember) /* no action on global queue */ { f->m_boq = QUERY; /* forward query, to root */ f->m_vsize = remember; } else { f->m_boq = QUERY_F; /* no match -- busy */ f->m_vsize = 112; /* anything non-zero */ #ifdef USE_DISK if (dsk_written != dsk_drained) { mem_drain(); } #endif } } continue; } if (f->m_boq == QUERY_F) { if (verbose) { cpu_printf("got QUERY_F on q%d from slot %d\n", q, prfull[q]-1); } f->m_vsize = 0; /* release slot */ if (core_id == 0 && remote_party == 0) /* original root cpu0 */ { if (verbose) cpu_printf("No Match on Query\n"); query_in_progress = 0; } else { if (TargetQ_Full(target_q)) { if (verbose) cpu_printf("warning: forwarding query_f, target queue full\n"); } f = Get_Free_Frame(target_q); if (verbose) cpu_printf("forward QUERY_F to q%d into slot %d\n", target_q, prfree[target_q]-1); if (!f) { Uerror("Fatal5: no free slot"); } f->m_boq = QUERY_F; /* cannot terminate yet */ f->m_vsize = 113; /* anything non-zero */ } #ifdef USE_DISK if (dsk_written != dsk_drained) { mem_drain(); } #endif continue; } if (f->m_boq == QUIT) { if (0) cpu_printf("done -- local memcnt %g Mb\n", memcnt/(1048576.)); retrieve_info((SM_results *) f); /* collect and combine stats */ if (verbose) { cpu_printf("received Quit\n"); snapshot(); } f->m_vsize = 0; /* release incoming slot */ if (core_id != 0) { f = Get_Free_Frame(target_q); /* new outgoing slot */ if (!f) { Uerror("Fatal6: no free slot"); } m_clear_frame(f); /* start with zeroed stats */ record_info((SM_results *) f); f->m_boq = QUIT; /* forward combined results */ f->m_vsize = 114; /* anything non-zero */ if (verbose>1) cpu_printf("fwd Results to q%d\n", target_q); } break; /* successful termination */ } /* else: 0<= boq <= 255, means STATE transfer */ if (unpack_state(f, q) != 0) { nstates_get++; f->m_vsize = 0; /* release slot */ if (VVERBOSE) cpu_printf("Got state\n"); if (search_terminated != NULL && *search_terminated == 0) { new_state(); /* explore successors */ memset((uchar *) &cur_Root, 0, sizeof(SM_frame)); /* avoid confusion */ } else { pan_exit(0); } } else { pan_exit(0); } } if (verbose) cpu_printf("done got %d put %d\n", nstates_get, nstates_put); sleep_report(); } void give_up(int unused_x) { if (search_terminated != NULL) { *search_terminated |= 32; /* give_up */ } if (!writing_trail) { was_interrupted = 1; snapshot(); cpu_printf("Give Up\n"); sleep_report(); pan_exit(1); } else /* we are already terminating */ { cpu_printf("SIGINT\n"); } } void check_overkill(void) { vmax_seen = (vmax_seen + 7)/ 8; vmax_seen *= 8; /* round up to a multiple of 8 */ if (core_id == 0 && !remote_party && nstates_put > 0 && VMAX - vmax_seen > 8) { #ifdef BITSTATE printf("cpu0: max VMAX value seen in this run: "); #else printf("cpu0: recommend recompiling with "); #endif printf("-DVMAX=%d\n", vmax_seen); } } void mem_put(int q) /* handoff state to other cpu, workq q */ { SM_frame *f; int i, j; if (vsize > VMAX) { vsize = (vsize + 7)/8; vsize *= 8; /* round up */ printf("pan: recompile with -DVMAX=N with N >= %d\n", (int) vsize); Uerror("aborting"); } if (now._nr_pr > PMAX) { printf("pan: recompile with -DPMAX=N with N >= %d\n", now._nr_pr); Uerror("aborting"); } if (now._nr_qs > QMAX) { printf("pan: recompile with -DQMAX=N with N >= %d\n", now._nr_qs); Uerror("aborting"); } if (vsize > vmax_seen) vmax_seen = vsize; if (now._nr_pr > pmax_seen) pmax_seen = now._nr_pr; if (now._nr_qs > qmax_seen) qmax_seen = now._nr_qs; f = Get_Free_Frame(q); /* not called in likely deadlock states */ if (!f) { Uerror("Fatal3: no free slot"); } if (VVERBOSE) cpu_printf("putting state into q%d\n", q); memcpy((uchar *) f->m_now, (uchar *) &now, vsize); #if !defined(NOCOMP) && !defined(HC) memset((uchar *) f->m_mask, 0, (VMAX+7)/8 * sizeof(char)); for (i = j = 0; i < VMAX; i++, j = (j+1)%8) { if (Mask[i]) { f->m_mask[i/8] |= (1< 0) { memcpy((uchar *) f->m_p_offset, (uchar *) proc_offset, now._nr_pr * sizeof(OFFT)); memcpy((uchar *) f->m_p_skip, (uchar *) proc_skip, now._nr_pr * sizeof(uchar)); } if (now._nr_qs > 0) { memcpy((uchar *) f->m_q_offset, (uchar *) q_offset, now._nr_qs * sizeof(OFFT)); memcpy((uchar *) f->m_q_skip, (uchar *) q_skip, now._nr_qs * sizeof(uchar)); } #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1) c_stack((uchar *) f->m_c_stack); /* save unmatched tracked data */ #endif #ifdef FULL_TRAIL f->m_stack = stack_last[core_id]; #endif f->nr_handoffs = nr_handoffs+1; f->m_tau = trpt->tau; f->m_o_pm = trpt->o_pm; f->m_boq = boq; f->m_vsize = vsize; /* must come last - now the other cpu can see it */ if (query_in_progress == 1) query_in_progress = 2; /* make sure we know, if a query makes the rounds */ nstates_put++; } #ifdef USE_DISK int Dsk_W_Nr, Dsk_R_Nr; int dsk_file = -1, dsk_read = -1; unsigned long dsk_written, dsk_drained; char dsk_name[512]; #ifndef BFS_DISK #if defined(WIN32) || defined(WIN64) #define RFLAGS (O_RDONLY|O_BINARY) #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY) #else #define RFLAGS (O_RDONLY) #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC) #endif #endif void dsk_stats(void) { int i; if (dsk_written > 0) { cpu_printf("dsk_written %d states in %d files\ncpu%d: dsk_drained %6d states\n", dsk_written, Dsk_W_Nr, core_id, dsk_drained); close(dsk_read); close(dsk_file); for (i = 0; i < Dsk_W_Nr; i++) { sprintf(dsk_name, "Q%.3d_%.3d.tmp", i, core_id); unlink(dsk_name); } } } void mem_drain(void) { SM_frame *f, g; int q = (core_id + 1) % NCORE; /* target q */ int sz; if (dsk_read < 0 || dsk_written <= dsk_drained) { return; } while (dsk_written > dsk_drained && TargetQ_NotFull(q)) { f = Get_Free_Frame(q); if (!f) { Uerror("Fatal: unhandled condition"); } if ((dsk_drained+1)%MAX_DSK_FILE == 0) /* 100K states max per file */ { (void) close(dsk_read); /* close current read handle */ sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_R_Nr++, core_id); (void) unlink(dsk_name); /* remove current file */ sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_R_Nr, core_id); cpu_printf("reading %s\n", dsk_name); dsk_read = open(dsk_name, RFLAGS); /* open next file */ if (dsk_read < 0) { Uerror("could not open dsk file"); } } if (read(dsk_read, &g, sizeof(SM_frame)) != sizeof(SM_frame)) { Uerror("bad dsk file read"); } sz = g.m_vsize; g.m_vsize = 0; memcpy(f, &g, sizeof(SM_frame)); f->m_vsize = sz; /* last */ dsk_drained++; } } void mem_file(void) { SM_frame f; int i, j, q = (core_id + 1) % NCORE; /* target q */ if (vsize > VMAX) { printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize); Uerror("aborting"); } if (now._nr_pr > PMAX) { printf("pan: recompile with -DPMAX=N with N >= %d\n", now._nr_pr); Uerror("aborting"); } if (now._nr_qs > QMAX) { printf("pan: recompile with -DQMAX=N with N >= %d\n", now._nr_qs); Uerror("aborting"); } if (VVERBOSE) cpu_printf("filing state for q%d\n", q); memcpy((uchar *) f.m_now, (uchar *) &now, vsize); #if !defined(NOCOMP) && !defined(HC) memset((uchar *) f.m_mask, 0, (VMAX+7)/8 * sizeof(char)); for (i = j = 0; i < VMAX; i++, j = (j+1)%8) { if (Mask[i]) { f.m_mask[i/8] |= (1< 0) { memcpy((uchar *)f.m_p_offset, (uchar *)proc_offset, now._nr_pr*sizeof(OFFT)); memcpy((uchar *)f.m_p_skip, (uchar *)proc_skip, now._nr_pr*sizeof(uchar)); } if (now._nr_qs > 0) { memcpy((uchar *) f.m_q_offset, (uchar *) q_offset, now._nr_qs*sizeof(OFFT)); memcpy((uchar *) f.m_q_skip, (uchar *) q_skip, now._nr_qs*sizeof(uchar)); } #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1) c_stack((uchar *) f.m_c_stack); /* save unmatched tracked data */ #endif #ifdef FULL_TRAIL f.m_stack = stack_last[core_id]; #endif f.nr_handoffs = nr_handoffs+1; f.m_tau = trpt->tau; f.m_o_pm = trpt->o_pm; f.m_boq = boq; f.m_vsize = vsize; if (query_in_progress == 1) { query_in_progress = 2; } if (dsk_file < 0) { sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_W_Nr, core_id); dsk_file = open(dsk_name, WFLAGS, 0644); dsk_read = open(dsk_name, RFLAGS); if (dsk_file < 0 || dsk_read < 0) { cpu_printf("File: <%s>\n", dsk_name); Uerror("cannot open diskfile"); } Dsk_W_Nr++; /* nr of next file to open */ cpu_printf("created temporary diskfile %s\n", dsk_name); } else if ((dsk_written+1)%MAX_DSK_FILE == 0) { close(dsk_file); /* close write handle */ sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_W_Nr++, core_id); dsk_file = open(dsk_name, WFLAGS, 0644); if (dsk_file < 0) { cpu_printf("File: <%s>\n", dsk_name); Uerror("aborting: cannot open new diskfile"); } cpu_printf("created temporary diskfile %s\n", dsk_name); } if (write(dsk_file, &f, sizeof(SM_frame)) != sizeof(SM_frame)) { Uerror("aborting -- disk write failed (disk full?)"); } nstates_put++; dsk_written++; } #endif int mem_hand_off(void) { if (search_terminated == NULL || *search_terminated != 0) /* not a full crash check */ { pan_exit(0); } iam_alive(); /* on every transition of Down */ #ifdef USE_DISK mem_drain(); /* maybe call this also on every Up */ #endif if (depth > z_handoff /* above handoff limit */ #ifndef SAFETY && !a_cycles /* not in liveness mode */ #endif #if SYNC && boq == -1 /* not mid-rv */ #endif #ifdef VERI && (trpt->tau&4) /* claim moves first */ && !((trpt-1)->tau&128) /* not a stutter move */ #endif && !(trpt->tau&8)) /* not an atomic move */ { int q = (core_id + 1) % NCORE; /* circular handoff */ #ifdef GENEROUS if (prcnt[q] < LN_FRAMES) #else if (TargetQ_NotFull(q) && (dfs_phase2 == 0 || prcnt[core_id] > 0)) #endif { mem_put(q); return 1; } { int rval; #ifndef NGQ rval = GlobalQ_HasRoom(); #else rval = 0; #endif #ifdef USE_DISK if (rval == 0) { void mem_file(void); mem_file(); rval = 1; } #endif return rval; } } return 0; /* i.e., no handoff */ } void mem_put_acc(void) /* liveness mode */ { int q = (core_id + 1) % NCORE; if (search_terminated == NULL || *search_terminated != 0) { pan_exit(0); } #ifdef USE_DISK mem_drain(); #endif /* some tortured use of preprocessing: */ #if !defined(NGQ) || defined(USE_DISK) if (TargetQ_Full(q)) { #endif #ifndef NGQ if (GlobalQ_HasRoom()) { return; } #endif #ifdef USE_DISK mem_file(); } else #else #if !defined(NGQ) || defined(USE_DISK) } #endif #endif { mem_put(q); } } #if defined(WIN32) || defined(WIN64) void init_shm(void) /* initialize shared work-queues */ { char key[512]; int n, m; int must_exit = 0; if (core_id == 0 && verbose) { printf("cpu0: step 3: allocate shared work-queues %g Mb\n", ((double) NCORE * LWQ_SIZE + GWQ_SIZE) / (1048576.)); } for (m = 0; m < NR_QS; m++) /* last q is global 1 */ { double qsize = (m == NCORE) ? GWQ_SIZE : LWQ_SIZE; sprintf(key, "Global\\pan_%s_%.3d", PanSource, m); if (core_id == 0) { shmid[m] = CreateFileMapping( INVALID_HANDLE_VALUE, /* use paging file */ NULL, /* default security */ PAGE_READWRITE, /* access permissions */ 0, /* high-order 4 bytes */ qsize, /* low-order bytes, size in bytes */ key); /* name */ } else /* worker nodes just open these segments */ { shmid[m] = OpenFileMapping( FILE_MAP_ALL_ACCESS, /* read/write access */ FALSE, /* children do not inherit handle */ key); } if (shmid[m] == NULL) { fprintf(stderr, "cpu%d: could not create or open shared queues\n", core_id); must_exit = 1; break; } /* attach: */ shared_mem[m] = (char *) MapViewOfFile(shmid[m], FILE_MAP_ALL_ACCESS, 0, 0, 0); if (shared_mem[m] == NULL) { fprintf(stderr, "cpu%d: cannot attach shared q%d (%d Mb)\n", core_id, m+1, (int) (qsize/(1048576.))); must_exit = 1; break; } memcnt += qsize; m_workq[m] = (SM_frame *) shared_mem[m]; if (core_id == 0) { int nframes = (m == NCORE) ? GN_FRAMES : LN_FRAMES; for (n = 0; n < nframes; n++) { m_workq[m][n].m_vsize = 0; m_workq[m][n].m_boq = 0; } } } if (must_exit) { fprintf(stderr, "pan: check './pan --' for usage details\n"); pan_exit(1); /* calls cleanup_shm */ } } static uchar * prep_shmid_S(size_t n) /* either sets SS or H_tab, WIN32/WIN64 */ { char *rval; #ifndef SEP_STATE char key[512]; if (verbose && core_id == 0) { #ifdef BITSTATE printf("cpu0: step 1: allocate shared bitstate %g Mb\n", (double) n / (1048576.)); #else printf("cpu0: step 1: allocate shared hastable %g Mb\n", (double) n / (1048576.)); #endif } #ifdef MEMLIM if (memcnt + (double) n > memlim) { printf("cpu%d: S %8g + %d Kb exceeds memory limit of %8g Mb\n", core_id, memcnt/1024., n/1024, memlim/(1048576.)); printf("cpu%d: insufficient memory -- aborting\n", core_id); exit(1); } #endif /* make key different from queues: */ sprintf(key, "Global\\pan_%s_%.3d", PanSource, NCORE+2); /* different from qs */ if (core_id == 0) /* root */ { shmid_S = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, #ifdef WIN64 PAGE_READWRITE, (n>>32), (n & 0xffffffff), key); #else PAGE_READWRITE, 0, n, key); #endif memcnt += (double) n; } else /* worker */ { shmid_S = OpenFileMapping(FILE_MAP_ALL_ACCESS, FALSE, key); } if (shmid_S == NULL) { #ifdef BITSTATE fprintf(stderr, "cpu%d: cannot %s shared bitstate", core_id, core_id?"open":"create"); #else fprintf(stderr, "cpu%d: cannot %s shared hashtable", core_id, core_id?"open":"create"); #endif fprintf(stderr, "pan: check './pan --' for usage details\n"); pan_exit(1); } rval = (char *) MapViewOfFile(shmid_S, FILE_MAP_ALL_ACCESS, 0, 0, 0); /* attach */ if ((char *) rval == NULL) { fprintf(stderr, "cpu%d: cannot attach shared bitstate or hashtable\n", core_id); fprintf(stderr, "pan: check './pan --' for usage details\n"); pan_exit(1); } #else rval = (char *) emalloc(n); #endif return (uchar *) rval; } static uchar * prep_state_mem(size_t n) /* WIN32/WIN64 sets memory arena for states */ { char *rval; char key[512]; static int cnt = 3; /* start larger than earlier ftok calls */ if (verbose && core_id == 0) { printf("cpu0: step 2+: pre-allocate memory arena %d of %g Mb\n", cnt-3, (double) n / (1048576.)); } #ifdef MEMLIM if (memcnt + (double) n > memlim) { printf("cpu%d: error: M %.0f + %.0f exceeds memory limit of %.0f Kb\n", core_id, memcnt/1024.0, (double) n/1024.0, memlim/1024.0); return NULL; } #endif sprintf(key, "Global\\pan_%s_%.3d", PanSource, NCORE+cnt); cnt++; if (core_id == 0) { shmid_M = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, #ifdef WIN64 PAGE_READWRITE, (n>>32), (n & 0xffffffff), key); #else PAGE_READWRITE, 0, n, key); #endif } else { shmid_M = OpenFileMapping(FILE_MAP_ALL_ACCESS, FALSE, key); } if (shmid_M == NULL) { printf("cpu%d: failed to get pool of shared memory nr %d of size %d\n", core_id, cnt-3, n); printf("pan: check './pan --' for usage details\n"); return NULL; } rval = (char *) MapViewOfFile(shmid_M, FILE_MAP_ALL_ACCESS, 0, 0, 0); /* attach */ if (rval == NULL) { printf("cpu%d: failed to attach pool of shared memory nr %d of size %d\n", core_id, cnt-3, n); return NULL; } return (uchar *) rval; } void init_HT(unsigned long n) /* WIN32/WIN64 version */ { volatile char *x; double get_mem; #ifndef SEP_STATE char *dc_mem_start; #endif if (verbose) printf("cpu%d: initialization for Windows\n", core_id); #ifdef SEP_STATE #ifndef MEMLIM if (verbose) { printf("cpu0: steps 0,1: no -DMEMLIM set\n"); } #else if (verbose) printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n", MEMLIM, ((double)n/(1048576.)), ((double) NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.)); #endif get_mem = NCORE * sizeof(double) + (1 + CS_NR) * sizeof(void *)+ 4*sizeof(void *) + 2*sizeof(double); /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */ get_mem += 4 * NCORE * sizeof(void *); #ifdef FULL_TRAIL get_mem += (NCORE) * sizeof(Stack_Tree *); /* NCORE * stack_last */ #endif x = (volatile char *) prep_state_mem((size_t) get_mem); shmid_X = (void *) x; if (x == NULL) { printf("cpu0: could not allocate shared memory, see ./pan --\n"); exit(1); } search_terminated = (volatile unsigned int *) x; /* comes first */ x += sizeof(void *); /* maintain alignment */ is_alive = (volatile double *) x; x += NCORE * sizeof(double); sh_lock = (volatile int *) x; x += CS_NR * sizeof(void *); /* allow 1 word per entry */ grfree = (volatile int *) x; x += sizeof(void *); grfull = (volatile int *) x; x += sizeof(void *); grcnt = (volatile int *) x; x += sizeof(void *); grmax = (volatile int *) x; x += sizeof(void *); prfree = (volatile int *) x; x += NCORE * sizeof(void *); prfull = (volatile int *) x; x += NCORE * sizeof(void *); prcnt = (volatile int *) x; x += NCORE * sizeof(void *); prmax = (volatile int *) x; x += NCORE * sizeof(void *); gr_readmiss = (volatile double *) x; x += sizeof(double); gr_writemiss = (volatile double *) x; x += sizeof(double); #ifdef FULL_TRAIL stack_last = (volatile Stack_Tree **) x; x += NCORE * sizeof(Stack_Tree *); #endif #ifndef BITSTATE H_tab = (H_el **) emalloc(n); #endif #else #ifndef MEMLIM #warning MEMLIM not set #define MEMLIM (2048) #endif if (core_id == 0 && verbose) printf("cpu0: step 0: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb) = %g Mb for state storage\n", MEMLIM, ((double)n/(1048576.)), ((double) NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.), (memlim - memcnt - (double) n - ((double) NCORE * LWQ_SIZE + GWQ_SIZE))/(1048576.)); #ifndef BITSTATE H_tab = (H_el **) prep_shmid_S((size_t) n); /* hash_table */ #endif get_mem = memlim - memcnt - ((double) NCORE) * LWQ_SIZE - GWQ_SIZE; if (get_mem <= 0) { Uerror("internal error -- shared state memory"); } if (core_id == 0 && verbose) { printf("cpu0: step 2: shared state memory %g Mb\n", get_mem/(1048576.)); } x = dc_mem_start = (char *) prep_state_mem((size_t) get_mem); /* for states */ if (x == NULL) { printf("cpu%d: insufficient memory -- aborting\n", core_id); exit(1); } search_terminated = (volatile unsigned int *) x; /* comes first */ x += sizeof(void *); /* maintain alignment */ is_alive = (volatile double *) x; x += NCORE * sizeof(double); sh_lock = (volatile int *) x; x += CS_NR * sizeof(int); grfree = (volatile int *) x; x += sizeof(void *); grfull = (volatile int *) x; x += sizeof(void *); grcnt = (volatile int *) x; x += sizeof(void *); grmax = (volatile int *) x; x += sizeof(void *); prfree = (volatile int *) x; x += NCORE * sizeof(void *); prfull = (volatile int *) x; x += NCORE * sizeof(void *); prcnt = (volatile int *) x; x += NCORE * sizeof(void *); prmax = (volatile int *) x; x += NCORE * sizeof(void *); gr_readmiss = (volatile double *) x; x += sizeof(double); gr_writemiss = (volatile double *) x; x += sizeof(double); #ifdef FULL_TRAIL stack_last = (volatile Stack_Tree **) x; x += NCORE * sizeof(Stack_Tree *); #endif if (((long)x)&(sizeof(void *)-1)) /* word alignment */ { x += sizeof(void *)-(((long)x)&(sizeof(void *)-1)); /* 64-bit align */ } #ifdef COLLAPSE ncomps = (unsigned long *) x; x += (256+2) * sizeof(unsigned long); #endif dc_shared = (sh_Allocater *) x; /* in shared memory */ x += sizeof(sh_Allocater); if (core_id == 0) /* root only */ { dc_shared->dc_id = shmid_M; dc_shared->dc_start = (void *) dc_mem_start; dc_shared->dc_arena = x; dc_shared->pattern = 1234567; dc_shared->dc_size = (long) get_mem - (long) (x - dc_mem_start); dc_shared->nxt = NULL; } #endif } #if defined(WIN32) || defined(WIN64) || defined(__i386__) || defined(__x86_64__) extern BOOLEAN InterlockedBitTestAndSet(LONG volatile* Base, LONG Bit); int tas(volatile LONG *s) { return InterlockedBitTestAndSet(s, 1); } #else #error missing definition of test and set operation for this platform #endif void cleanup_shm(int val) { int m; static int nibis = 0; if (nibis != 0) { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id, val); return; } else { nibis = 1; } if (search_terminated != NULL) { *search_terminated |= 16; /* cleanup_shm */ } for (m = 0; m < NR_QS; m++) { if (shmid[m] != NULL) { UnmapViewOfFile((char *) shared_mem[m]); CloseHandle(shmid[m]); } } #ifdef SEP_STATE UnmapViewOfFile((void *) shmid_X); CloseHandle((void *) shmid_M); #else #ifdef BITSTATE if (shmid_S != NULL) { UnmapViewOfFile(SS); CloseHandle(shmid_S); } #else if (core_id == 0 && verbose) { printf("cpu0: done, %ld Mb of shared state memory left\n", dc_shared->dc_size / (long)(1048576)); } if (shmid_S != NULL) { UnmapViewOfFile(H_tab); CloseHandle(shmid_S); } shmid_M = (void *) (dc_shared->dc_id); UnmapViewOfFile((char *) dc_shared->dc_start); CloseHandle(shmid_M); #endif #endif /* detached from shared memory - so cannot use cpu_printf */ if (verbose) { printf("cpu%d: done -- got %d states from queue\n", core_id, nstates_get); } } void mem_get(void) { SM_frame *f; int is_parent; #if defined(MA) && !defined(SEP_STATE) #error MA requires SEP_STATE in multi-core mode #endif #ifdef BFS #error instead of -DNCORE -DBFS use -DBFS_PAR #endif #ifdef SC #error SC is not supported in multi-core mode #endif init_shm(); /* we are single threaded when this starts */ signal(SIGINT, give_up); /* windows control-c interrupt */ if (core_id == 0 && verbose) { printf("cpu0: step 4: creating additional workers (proxy %d)\n", proxy_pid); } #if 0 if NCORE > 1 the child or the parent should fork N-1 more times the parent is the only process with core_id == 0 and is_parent > 0 the others (workers) have is_parent = 0 and core_id = 1..NCORE-1 #endif if (core_id == 0) /* root starts up the workers */ { worker_pids[0] = (DWORD) getpid(); /* for completeness */ while (++core_id < NCORE) /* first worker sees core_id = 1 */ { char cmdline[64]; STARTUPINFO si = { sizeof(si) }; PROCESS_INFORMATION pi; if (proxy_pid == core_id) /* always non-zero */ { sprintf(cmdline, "pan_proxy.exe -r %s-Q%d -Z%d", o_cmdline, getpid(), core_id); } else { sprintf(cmdline, "pan.exe %s-Q%d -Z%d", o_cmdline, getpid(), core_id); } if (verbose) printf("cpu%d: spawn %s\n", core_id, cmdline); is_parent = CreateProcess(0, cmdline, 0, 0, FALSE, 0, 0, 0, &si, &pi); if (is_parent == 0) { Uerror("fork failed"); } worker_pids[core_id] = pi.dwProcessId; worker_handles[core_id] = pi.hProcess; if (verbose) { cpu_printf("created core %d, pid %d\n", core_id, pi.dwProcessId); } if (proxy_pid == core_id) /* we just created the receive half */ { /* add proxy send, store pid in proxy_pid_snd */ sprintf(cmdline, "pan_proxy.exe -s %s-Q%d -Z%d -Y%d", o_cmdline, getpid(), core_id, worker_pids[proxy_pid]); if (verbose) printf("cpu%d: spawn %s\n", core_id, cmdline); is_parent = CreateProcess(0, cmdline, 0,0, FALSE, 0,0,0, &si, &pi); if (is_parent == 0) { Uerror("fork failed"); } proxy_pid_snd = pi.dwProcessId; proxy_handle_snd = pi.hProcess; if (verbose) { cpu_printf("created core %d, pid %d (send proxy)\n", core_id, pi.dwProcessId); } } } core_id = 0; /* reset core_id for root process */ } else /* worker */ { static char db0[16]; /* good for up to 10^6 cores */ static char db1[16]; tprefix = db0; sprefix = db1; sprintf(tprefix, "cpu%d_trail", core_id); /* avoid conflicts on file access */ sprintf(sprefix, "cpu%d_rst", core_id); memcnt = 0; /* count only additionally allocated memory */ } if (verbose) { cpu_printf("starting core_id %d -- pid %d\n", core_id, getpid()); } if (core_id == 0 && !remote_party) { new_state(); /* root starts the search */ if (verbose) cpu_printf("done with 1st dfs, nstates %g (put %d states), start reading q\n", nstates, nstates_put); dfs_phase2 = 1; } Read_Queue(core_id); /* all cores */ if (verbose) { cpu_printf("put %6d states into queue -- got %6d\n", nstates_put, nstates_get); } done = 1; wrapup(); exit(0); } #endif #ifdef BITSTATE void init_SS(unsigned long n) { SS = (uchar *) prep_shmid_S((size_t) n); init_HT(0L); } #endif #endif clock_t start_time; #if NCORE>1 clock_t crash_stamp; #endif #if !defined(WIN32) && !defined(WIN64) struct tms start_tm; #endif #if SYNC extern int q_zero(int); extern int not_RV(int); #endif void start_timer(void) { #if defined(WIN32) || defined(WIN64) start_time = clock(); #else start_time = times(&start_tm); #endif } double delta_time; void report_time(void) { printf("\npan: elapsed time %.3g seconds\n", delta_time); if (delta_time > 0.01) { printf("pan: rate %9.8g states/second\n", nstates/delta_time); if (verbose) { printf("pan: avg transition delay %.5g usec\n", delta_time/(nstates+truncs)); } } } void stop_timer(int report) { clock_t stop_time; #if !defined(WIN32) && !defined(WIN64) struct tms stop_tm; stop_time = times(&stop_tm); delta_time = ((double) (stop_time - start_time)) / ((double) sysconf(_SC_CLK_TCK)); #else stop_time = clock(); delta_time = ((double) (stop_time - start_time)) / ((double) CLOCKS_PER_SEC); #endif if (readtrail || delta_time < 0.00) return; #if NCORE>1 if (core_id == 0 && nstates > (double) 0) { printf("\ncpu%d: elapsed time %.3g seconds (%g states visited)\n", core_id, delta_time, nstates); if (delta_time > 0.01) { printf("cpu%d: rate %g states/second\n", core_id, nstates/delta_time); } { void check_overkill(void); check_overkill(); } } #else if (report) { report_time(); } #endif } #if NCORE>1 #ifdef T_ALERT double t_alerts[17]; void crash_report(void) { int i; printf("crash alert intervals:\n"); for (i = 0; i < 17; i++) { printf("%d\t%g\n", i, t_alerts[i]); } } #endif void crash_reset(void) { /* false alarm */ if (crash_stamp != (clock_t) 0) { #ifdef T_ALERT double delta_time; int i; #if defined(WIN32) || defined(WIN64) delta_time = ((double) (clock() - crash_stamp)) / ((double) CLOCKS_PER_SEC); #else delta_time = ((double) (times(&start_tm) - crash_stamp)) / ((double) sysconf(_SC_CLK_TCK)); #endif for (i = 0; i < 16; i++) { if (delta_time <= (i*30)) { t_alerts[i] = delta_time; break; } } if (i == 16) t_alerts[i] = delta_time; #endif if (verbose) printf("cpu%d: crash alert off\n", core_id); } crash_stamp = (clock_t) 0; } int crash_test(double maxtime) { double delta_time; if (crash_stamp == (clock_t) 0) { /* start timing */ #if defined(WIN32) || defined(WIN64) crash_stamp = clock(); #else crash_stamp = times(&start_tm); #endif if (verbose) { printf("cpu%d: crash detection\n", core_id); } return 0; } #if defined(WIN32) || defined(WIN64) delta_time = ((double) (clock() - crash_stamp)) / ((double) CLOCKS_PER_SEC); #else delta_time = ((double) (times(&start_tm) - crash_stamp)) / ((double) sysconf(_SC_CLK_TCK)); #endif return (delta_time >= maxtime); } #endif #ifdef BFS_PAR int ncores = 0; #endif void do_the_search(void) { int i; depth = mreached = 0; trpt = &trail[0]; #ifdef VERI trpt->tau |= 4; /* the claim moves first */ #endif for (i = 0; i < (int) now._nr_pr; i++) { P0 *ptr = (P0 *) pptr(i); #ifndef NP if (!(trpt->o_pm&2) && accpstate[ptr->_t][ptr->_p]) { trpt->o_pm |= 2; break; } #else if (!(trpt->o_pm&4) && progstate[ptr->_t][ptr->_p]) { trpt->o_pm |= 4; break; } #endif } #ifdef EVENT_TRACE #ifndef NP if (accpstate[EVENT_TRACE][now._event]) { trpt->o_pm |= 2; } #else if (progstate[EVENT_TRACE][now._event]) { trpt->o_pm |= 4; } #endif #endif #if !defined(NOCOMP) && !defined(HC) Mask[0] = Mask[1] = 1; /* _nr_pr, _nr_qs */ if (!a_cycles) { i = &(now._a_t) - (uchar *) &now; Mask[i] = 1; /* _a_t */ } #ifndef NOFAIR if (!fairness) { int j = 0; i = &(now._cnt[0]) - (uchar *) &now; while (j++ < NFAIR) Mask[i++] = 1; /* _cnt[] */ } #endif #endif #ifndef NOFAIR if (fairness && (a_cycles && (trpt->o_pm&2))) { now._a_t = 2; /* set the A-bit */ now._cnt[0] = now._nr_pr + 1; #ifdef VERBOSE printf("%3ld: fairness Rule 1, cnt=%d, _a_t=%d\n", depth, now._cnt[now._a_t&1], now._a_t); #endif } #endif c_stack_start = (char *) &i; /* meant to be read-only */ #if defined(HAS_CODE) && defined (C_INIT) C_INIT; /* initialization of data that must precede fork() */ c_init_done++; #endif #if defined(C_States) && (HAS_TRACK==1) /* capture initial state of tracked C objects */ c_update((uchar *) &(now.c_state[0])); #endif #ifdef HAS_CODE if (readtrail) getrail(); /* no return */ #endif #ifndef BFS_PAR start_timer(); #endif #ifdef BFS #ifdef BFS_PAR bfs_main(ncores,0); #else bfs(); #endif #else #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1) /* initial state of tracked & unmatched objects */ c_stack((uchar *) &(svtack->c_stack[0])); #endif #if defined(P_RAND) || defined(T_RAND) srand(s_rand+HASH_NR); #endif #if NCORE>1 mem_get(); #else new_state(); /* start 1st DFS */ #endif #endif } #ifdef INLINE_REV uchar do_reverse(Trans *t, short II, uchar M) { uchar _m = M; int tt = (int) ((P0 *)_this)->_p; #include BACKWARD_MOVES R999: return _m; } #endif #ifndef INLINE #ifdef EVENT_TRACE static char _tp = 'n'; static int _qid = 0; #endif uchar do_transit(Trans *t, short II) { uchar _m = 0; int tt = (int) ((P0 *)_this)->_p; #ifdef M_LOSS uchar delta_m = 0; #endif #ifdef EVENT_TRACE short oboq = boq; uchar ot = (uchar) ((P0 *)_this)->_t; if (II == -EVENT_TRACE) boq = -1; #define continue { boq = oboq; return 0; } #else #define continue return 0 #ifdef SEPARATE uchar ot = (uchar) ((P0 *)_this)->_t; #endif #endif #include FORWARD_MOVES P999: #ifdef EVENT_TRACE if (II == -EVENT_TRACE) boq = oboq; #endif return _m; #undef continue } #ifdef EVENT_TRACE void require(char tp, int qid) { Trans *t; _tp = tp; _qid = qid; if (now._event != endevent) for (t = trans[EVENT_TRACE][now._event]; t; t = t->nxt) { if (do_transit(t, -EVENT_TRACE)) { now._event = t->st; reached[EVENT_TRACE][t->st] = 1; #ifdef VERBOSE printf(" event_trace move to -> %d\n", t->st); #endif #ifndef BFS #ifndef NP if (accpstate[EVENT_TRACE][now._event]) (trpt+1)->o_pm |= 2; #else if (progstate[EVENT_TRACE][now._event]) (trpt+1)->o_pm |= 4; #endif #endif #ifdef NEGATED_TRACE if (now._event == endevent) { #ifndef BFS depth++; trpt++; #endif uerror("event_trace error (all events matched)"); #ifndef BFS trpt--; depth--; #endif break; } #endif for (t = t->nxt; t; t = t->nxt) { if (do_transit(t, -EVENT_TRACE)) Uerror("non-determinism in event-trace"); } return; } #ifdef VERBOSE else printf(" event_trace miss '%c' -- %d, %d, %d\n", tp, qid, now._event, t->forw); #endif } #ifdef NEGATED_TRACE now._event = endevent; /* only 1st try will count -- fixed 4.2.6 */ #else #ifndef BFS depth++; trpt++; #endif uerror("event_trace error (no matching event)"); #ifndef BFS trpt--; depth--; #endif #endif } #endif int enabled(int iam, int pid) { Trans *t; uchar *othis = _this; int res = 0; int tt; uchar ot; pid += BASE; if (pid == iam) Uerror("used: enabled(pid=thisproc)"); if (pid < 0 || pid >= (int) now._nr_pr) return 0; _this = pptr(pid); TstOnly = 1; tt = (int) ((P0 *)_this)->_p; ot = (uchar) ((P0 *)_this)->_t; for (t = trans[ot][tt]; t; t = t->nxt) if (do_transit(t, (short) pid)) { res = 1; break; } TstOnly = 0; _this = othis; return res; } #endif #ifdef HAS_PRIORITY int highest_priority(int pid, short nII, Trans *t) { int i = pid; uchar *othis = _this; #ifdef VERI if (nII == 0) { return 1; } #endif #ifdef HAS_PROVIDED i = pid+BASE; #endif if (i < 0 || i >= (int) now._nr_pr #ifdef HAS_PROVIDED || !provided(i, (uchar) ((P0 *)_this)->_t, (int) ((P0 *)_this)->_p, t) #endif ) { return 0; } for (i = BASE; i < now._nr_pr; i++) { _this = pptr(i); if (i != pid+BASE && ((P0 *)_this)->_priority > ((P0 *)pptr(pid+BASE))->_priority #ifdef HAS_PROVIDED && provided(i, (uchar) ((P0 *)_this)->_t, (int) ((P0 *)_this)->_p, 0) #endif && enabled(i+1, i-BASE)) { _this = othis; return 0; } } _this = othis; return 1; } int get_priority(int pid) { pid += BASE; /* 6.2.7 */ if (pid < 0 || pid >= (int) now._nr_pr) return 0; return ((P0 *)pptr(pid))->_priority; } int set_priority(int pid, int pr) { pid += BASE; /* 6.2.7 */ if (pid < 0 || pid >= (int) now._nr_pr) { #ifdef VERBOSE printf("warning: bad pid %d, no such process (set_priority)\n", pid); #endif return 1; } if (pr < 1 || pr > 255) { Uerror("priority is out of range"); } if (!TstOnly) { (trpt+1)->o_priority = (((P0 *)pptr(pid))->_priority & 255) | (pid << 8); ((P0 *)pptr(pid))->_priority = pr; } return 1; } #endif void snap_time(void) { clock_t stop_time; double delta_time; #if !defined(WIN32) && !defined(WIN64) struct tms stop_tm; stop_time = times(&stop_tm); delta_time = ((double) (stop_time - start_time)) / ((double) sysconf(_SC_CLK_TCK)); #else stop_time = clock(); delta_time = ((double) (stop_time - start_time)) / ((double) CLOCKS_PER_SEC); #endif if (delta_time > 0.01) { printf("t= %8.3g ", delta_time); printf("R= %7.0g", nstates/delta_time); } printf("\n"); if (quota > 0.1 && delta_time > quota) { printf("Time limit of %6.3g minutes exceeded\n", quota/60.0); #if NCORE>1 fflush(stdout); leave_critical(GLOBAL_LOCK); sudden_stop("time-limit"); exit(1); #endif wrapup(); } } void snapshot(void) { #ifdef BFS_PAR e_critical(BFS_GLOB); /* bfs_par / snapshot */ printf("cpu%d: ", who_am_i); #endif #if NCORE>1 enter_critical(GLOBAL_LOCK); /* ncore / snapshot */ printf("cpu%d: ", core_id); #endif printf("Depth= %7ld States= %8.3g ", #if NCORE>1 (long) (nr_handoffs * z_handoff) + #endif mreached, nstates); printf("Transitions= %8.3g ", nstates+truncs); #ifdef MA printf("Nodes= %7lu ", nr_states); #endif printf("Memory= %9.3f\t", memcnt/1048576.); snap_time(); fflush(stdout); #if NCORE>1 leave_critical(GLOBAL_LOCK); #endif #ifdef BFS_PAR x_critical(BFS_GLOB); #endif } #ifdef SC void stack2disk(void) { if (!stackwrite && (stackwrite = creat(stackfile, TMODE)) < 0) Uerror("cannot create stackfile"); if (write(stackwrite, trail, DDD*sizeof(Trail)) != DDD*sizeof(Trail)) Uerror("stackfile write error -- disk is full?"); memmove(trail, &trail[DDD], (HHH-DDD+2)*sizeof(Trail)); memset(&trail[HHH-DDD+2], 0, (omaxdepth - HHH + DDD - 2)*sizeof(Trail)); CNT1++; } void disk2stack(void) { long have; CNT2++; memmove(&trail[DDD], trail, (HHH-DDD+2)*sizeof(Trail)); if (!stackwrite || lseek(stackwrite, -DDD* (off_t) sizeof(Trail), SEEK_CUR) == -1) Uerror("disk2stack lseek error"); if (!stackread && (stackread = open(stackfile, 0)) < 0) Uerror("cannot open stackfile"); if (lseek(stackread, (CNT1-CNT2)*DDD* (off_t) sizeof(Trail), SEEK_SET) == -1) Uerror("disk2stack lseek error"); have = read(stackread, trail, DDD*sizeof(Trail)); if (have != DDD*sizeof(Trail)) Uerror("stackfile read error"); } #endif uchar * Pptr(int x) { if (x < 0 || x >= MAXPROC #ifdef TRIX || !processes[x]) #else || !proc_offset[x]) #endif return noptr; else return (uchar *) pptr(x); } uchar * Qptr(int x) { if (x < 0 || x >= MAXQ #ifdef TRIX || !channels[x]) #else || !q_offset[x]) #endif return noqptr; else return (uchar *) qptr(x); } #if NCLAIMS>1 void select_claim(int n) { int m, i; if (n < 0 || n >= NCLAIMS) { uerror("non-existing claim"); } else { m = ((Pclaim *)pptr(0))->_n; if (verbose) { printf("%d: Claim %s (%d), from state %d\n", (int) depth, procname[spin_c_typ[n]], n, ((Pclaim *)pptr(0))->c_cur[n]); } else { printf("pan: ltl formula %s\n", procname[spin_c_typ[n]]); } ((Pclaim *)pptr(0))->c_cur[m] = ((Pclaim *)pptr(0))->_p; ((Pclaim *)pptr(0))->_t = spin_c_typ[n]; ((Pclaim *)pptr(0))->_p = ((Pclaim *)pptr(0))->c_cur[n]; ((Pclaim *)pptr(0))->_n = n; for (i = 0; src_all[i].src != (short *) 0; i++) { if (src_all[i].tp == spin_c_typ[n]) { src_claim = src_all[i].src; break; } } if (src_all[i].src == (short *) 0) { uerror("cannot happen: src_ln ref"); } } } #else void select_claim(int n) { if (n != 0) uerror("non-existing claim"); } #endif int qs_empty(void); #if !defined(BFS) && (!defined(BITSTATE) || !defined(MA)) #ifdef NSUCC int N_succ[512]; void tally_succ(int cnt) { if (cnt < 512) N_succ[cnt]++; else printf("tally_succ: cnt %d exceeds range\n", cnt); } void dump_succ(void) { int i; double sum = 0.0; double w_avg = 0.0; printf("Successor counts:\n"); for (i = 0; i < 512; i++) { sum += (double) N_succ[i]; } for (i = 0; i < 512; i++) { if (N_succ[i] > 0) { printf("%3d %10d (%.4g %% of total)\n", i, N_succ[i], (100.0 * (double) N_succ[i])/sum); w_avg += (double) i * (double) N_succ[i]; } } if (sum > N_succ[0]) printf("mean %.4g (without 0: %.4g)\n", w_avg / sum, w_avg / (sum - (double) N_succ[0])); } #endif #ifdef P_REVERSE #define FROM_P (BASE) #define UPTO_P (now._nr_pr-1) #define MORE_P (II <= To) #define INI_P (From-1) #define CNT_P (1 + (To - From)) #define NDONE_P (From <= To) #define ALL_P (II = From; II <= To; II++) #else #define FROM_P (now._nr_pr-1) #define UPTO_P (BASE) #define MORE_P (II >= BASE) #define INI_P (From+1) #define CNT_P (1 + (From - To)) #define NDONE_P (From >= To) #define ALL_P (II = From; II >= To; II--) #endif #ifdef PERMUTED #define CONTINUE0 { if (reversing&2) { II = oII; } continue; } #define CONTINUE { if (reversing&2) { p_reorder(seed); II = oII; } continue; } #else #define CONTINUE0 { continue; } #define CONTINUE { continue; } #endif #ifdef PERMUTED uchar _permutation_[256]; void set_reversed(int unused) { int i, n = now._nr_pr; #ifdef VERBOSE printf("%ld: Set_reversed\n", depth); #endif #if defined(VERI) && !defined(NOCLAIM) for (i = 1; i < n; i++) { _permutation_[i] = n-i; } #else for (i = 0; i < n; i++) { _permutation_[i] = n-1-i; } #endif } void set_rotated(int unused) { int i, n = now._nr_pr; #ifdef VERBOSE printf("%ld: Set_rotated %d\n", depth, p_rotate); #endif #if defined(VERI) && !defined(NOCLAIM) for (i = 1; i < n; i++) { _permutation_[i] = 1+(i-1+p_rotate)%(n-1); } #else for (i = 0; i < n; i++) { _permutation_[i] = (i+p_rotate)%n; } #endif } void set_randrot(int unused) { if (now._nr_pr > 1) { p_rotate = 1+rand()%(now._nr_pr-1); } else { p_rotate = 0; } set_rotated(0); } void set_permuted(int T) { /* permute nrs 1..n-1, leave 0 in place */ int i, j, k, n = now._nr_pr; char tmp, *in = &(_permutation_[0]); #ifdef VERBOSE printf("%ld: Set_permuted %d\n", depth, T); #endif srand(T); for (i = 0; i < n; i++) { in[i] = i; } if (n > 1) { for (i = 0; i < n; i++) { #if defined(VERI) && !defined(NOCLAIM) j = 1 + rand()%(n-1); k = 1 + rand()%(n-1); #else j = rand()%(n); k = rand()%(n); #endif tmp = in[j]; in[j] = in[k]; in[k] = tmp; } } } #ifdef VERBOSE short get_permuted(int x) { printf("%ld: Get_permuted %d -> %d\n", depth, x, _permutation_[x]); return (short) _permutation_[x]; } #else #define get_permuted(x) (short) _permutation_[x] #endif #endif /* * new_state() is the main DFS search routine in the verifier * it has a lot of code ifdef-ed together to support * different search modes, which makes it quite unreadable. * if you are studying the code, use the C preprocessor * to generate a specific version from the pan.c source, * e.g. by saying: * gcc -E -DNOREDUCE -DBITSTATE pan.c > ppan.c * and then study the resulting file, instead of this version */ void new_state(void) { Trans *t; uchar _n, _m, ot; #ifdef T_RAND short ooi, eoi; #endif #ifdef PERMUTED short oII; uint seed; #endif #ifdef M_LOSS uchar delta_m = 0; #endif short II, JJ = 0, kk; int tt; short From = FROM_P, To = UPTO_P; #ifdef BCS trpt->sched_limit = 0; /* at depth=0 only */ #endif Down: #ifdef CHECK cpu_printf("%d: Down - %s %saccepting [pids %d-%d]\n", depth, (trpt->tau&4)?"claim":"program", (trpt->o_pm&2)?"":"non-", From, To); #endif #ifdef P_RAND trpt->p_skip = -1; #endif #ifdef SC if (depth > hiwater) { stack2disk(); maxdepth += DDD; hiwater += DDD; trpt -= DDD; if(verbose) printf("zap %ld: %ld (maxdepth now %ld)\n", CNT1, hiwater, maxdepth); } #endif trpt->tau &= ~(16|32|64); /* make sure these are off */ #if defined(FULLSTACK) && defined(MA) trpt->proviso = 0; #endif #ifdef NSUCC trpt->n_succ = 0; #endif #if NCORE>1 if (mem_hand_off()) { #if SYNC (trpt+1)->o_n = 1; /* not a deadlock: as below */ #endif #ifndef LOOPSTATE (trpt-1)->tau |= 16; /* worstcase guess: as below */ #endif #if NCORE>1 && defined(FULL_TRAIL) if (upto > 0) { Pop_Stack_Tree(); } #endif goto Up; } #endif if (depth >= maxdepth) { if (!warned) { warned = 1; printf("error: max search depth too small\n"); } if (bounded) { uerror("depth limit reached"); } truncs++; #if SYNC (trpt+1)->o_n = 1; /* not a deadlock */ #endif #ifndef LOOPSTATE (trpt-1)->tau |= 16; /* worstcase guess */ #endif #if NCORE>1 && defined(FULL_TRAIL) if (upto > 0) { Pop_Stack_Tree(); } #endif goto Up; } AllOver: #if (defined(FULLSTACK) && !defined(MA)) || NCORE>1 /* if atomic or rv move, carry forward previous state */ trpt->ostate = (trpt-1)->ostate; #endif #ifdef VERI if ((trpt->tau&4) || ((trpt-1)->tau&128)) #endif if (boq == -1) { /* if not mid-rv */ #ifndef SAFETY if ((now._a_t&1) && depth > A_depth) { int delta = S_A + 2; if (!memcmp((char *)&A_Root + delta, (char *)&now + delta, vsize - delta)) { #ifndef NOFAIR if (fairness && now._cnt[1] != 1) /* was > 1 */ { #ifdef CHECK printf(" fairness count non-zero\n"); #endif /* treat as new state */ } else #endif { depthfound = A_depth; #ifdef CHECK printf("matches seed\n"); #endif #ifdef NP uerror("non-progress cycle"); #else uerror("acceptance cycle"); #endif #if NCORE>1 && defined(FULL_TRAIL) if (upto > 0) { Pop_Stack_Tree(); } #endif goto Up; } } #ifdef CHECK else { printf("not seed\n"); } #endif } #endif if (!(trpt->tau&8)) /* if no atomic move */ { #if defined(BCS) && defined(NO_LAST) && defined(HAS_LAST) uchar was_last = now._last; now._last = 0; /* value not stored */ #endif #ifdef BITSTATE #ifdef CNTRSTACK #if defined(BCS) && defined(STORE_CTX) { int xj; for (xj = trpt->sched_limit; xj <= sched_max; xj++) { now._ctx = xj; II = b_store((char *)&now, vsize); trpt->j6 = j1_spin; trpt->j7 = j2_spin; JJ = LL[j1_spin] && LL[j2_spin]; if (II != 0) { break; } } now._ctx = 0; /* just in case */ } #else II = b_store((char *)&now, vsize); trpt->j6 = j1_spin; trpt->j7 = j2_spin; JJ = LL[j1_spin] && LL[j2_spin]; #endif #else #ifdef FULLSTACK #if defined(BCS) && defined(STORE_CTX) { int xj; now._ctx = 0; JJ = onstack_now(); for (xj = trpt->sched_limit; xj <= sched_max; xj++) { now._ctx = xj; II = b_store((char *)&now, vsize); if (II != 0) { break; } } now._ctx = 0; } #else JJ = onstack_now(); II = b_store((char *)&now, vsize); #endif #else #if defined(BCS) && defined(STORE_CTX) { int xj; for (xj = trpt->sched_limit; xj <= sched_max; xj++) { now._ctx = xj; II = b_store((char *)&now, vsize); JJ = II; /* worstcase guess for p.o. - order corrected in 5.2.1 */ if (II != 0) { break; } } now._ctx = 0; } #else II = b_store((char *)&now, vsize); JJ = II; /* worstcase guess for p.o. - order corrected in 5.2.1 */ #endif #endif #endif #else #ifdef MA II = g_store((char *)&now, vsize, 0); #ifndef FULLSTACK JJ = II; #else JJ = (II == 2)?1:0; #endif #else II = h_store((char *)&now, vsize); /* @hash j1_spin II */ #ifdef FULLSTACK JJ = (II == 2)?1:0; #endif #endif #endif kk = (II == 1 || II == 2); #if defined(BCS) && defined(NO_LAST) && defined(HAS_LAST) now._last = was_last; /* restore value */ #endif #ifndef SAFETY #if !defined(HC) && (NCORE==1 || defined (SEP_STATE)) if (II == 2 && ((trpt->o_pm&2) || ((trpt-1)->o_pm&2))) #ifndef NOFAIR if (a_cycles && !fairness) /* 5.1.6 -- example by Hirofumi Watanabe */ #endif if (depth > A_depth) /* forum example by adl */ { II = 3; /* Schwoon & Esparza 2005, Gastin&Moro 2004 */ #ifdef VERBOSE printf("state match on dfs stack\n"); #endif goto same_case; } #endif #if defined(FULLSTACK) && defined(BITSTATE) if (!JJ && (now._a_t&1) && depth > A_depth) { int oj1 = j1_spin; uchar o_a_t = now._a_t; now._a_t &= ~(1|16|32); if (onstack_now()) { II = 3; #ifdef VERBOSE printf("state match on 1st dfs stack\n"); #endif } now._a_t = o_a_t; j1_spin = oj1; } #endif if (II == 3 && a_cycles && (now._a_t&1)) { #ifndef NOFAIR if (fairness && now._cnt[1] != 1) /* was > 1 */ { #ifdef CHECK printf(" fairness count non-zero\n"); #endif II = 0; } else #endif { #ifndef BITSTATE nShadow--; #endif same_case: if (Lstate) depthfound = Lstate->D; #ifdef NP uerror("non-progress cycle"); #else uerror("acceptance cycle"); #endif #if NCORE>1 && defined(FULL_TRAIL) if (upto > 0) { Pop_Stack_Tree(); } #endif goto Up; } } #endif #ifndef NOREDUCE #ifndef SAFETY #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO) if (II != 0 && (!Lstate || Lstate->cpu_id < core_id)) { (trpt-1)->tau |= 16; } #endif if ((II && JJ) || (II == 3)) { /* marker for liveness proviso */ #ifndef LOOPSTATE (trpt-1)->tau |= 16; #endif truncs2++; } #else #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO) if (!(II != 0 && (!Lstate || Lstate->cpu_id < core_id))) { /* treat as stack state */ (trpt-1)->tau |= 16; } else { /* treat as non-stack state */ (trpt-1)->tau |= 64; } #endif if (!II || !JJ) { /* successor outside stack */ (trpt-1)->tau |= 64; } #endif #endif #if defined(BCS) && (defined(NOREDUCE) || !defined(SAFETY)) if (!II || !JJ) { (trpt-1)->tau |= 64; } #endif if (II) { truncs++; #if NCORE>1 && defined(FULL_TRAIL) if (upto > 0) { Pop_Stack_Tree(); if (depth == 0) { return; } } #endif goto Up; } if (!kk) { static long sdone = (long) 0; long ndone; nstates++; #if defined(ZAPH) && defined(BITSTATE) zstates += (double) hfns; #endif ndone = (ulong) (nstates/(freq)); if (ndone != sdone) { snapshot(); sdone = ndone; #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA) if (nstates > ((double)(ONE_L<<(ssize+1)))) { void resize_hashtable(void); resize_hashtable(); } #endif #if defined(ZAPH) && defined(BITSTATE) if (zstates > ((double)(ONE_L<<(ssize-2)))) { /* more than half the bits set */ void zap_hashtable(void); zap_hashtable(); zstates = 0; } #endif } #ifdef SVDUMP if (vprefix > 0) #ifdef SHO /* always use the same hashfunction, for consistency across runs */ if (HASH_NR != 0) { int oh = HASH_NR; HASH_NR = 0; d_hash((uchar *) &now, vsize); /* SHO - set K1 */ HASH_NR = oh; } if (write(svfd, (uchar *) &K1, sizeof(ulong)) != sizeof(ulong)) #else if (write(svfd, (uchar *) &now, vprefix) != vprefix) #endif { fprintf(efd, "writing %s.svd failed\n", PanSource); wrapup(); } #endif #if defined(MA) && defined(W_XPT) if ((ulong) nstates%W_XPT == 0) { void w_xpoint(void); w_xpoint(); } #endif } #if defined(FULLSTACK) || defined(CNTRSTACK) onstack_put(); #ifdef DEBUG2 #if defined(FULLSTACK) && !defined(MA) printf("%d: putting %u (%d)\n", depth, trpt->ostate, (trpt->ostate)?trpt->ostate->tagged:0); #else printf("%d: putting\n", depth); #endif #endif #else #if NCORE>1 trpt->ostate = Lstate; #endif #endif } } if (depth > mreached) mreached = depth; #ifdef VERI if (trpt->tau&4) #endif trpt->tau &= ~(1|2); /* timeout and -request off */ _n = 0; #if SYNC (trpt+1)->o_n = 0; #endif #ifdef VERI if (now._nr_pr == 0) /* claim terminated */ { uerror("end state in claim reached"); } if (stopstate[((Pclaim *)pptr(0))->_t][((Pclaim *)pptr(0))->_p]) { uerror("end state in claim reached"); } Stutter: if (trpt->tau&4) /* must make a claimmove */ { #ifndef NOFAIR if ((now._a_t&2) /* A-bit set */ && now._cnt[now._a_t&1] == 1) { now._a_t &= ~2; now._cnt[now._a_t&1] = 0; trpt->o_pm |= 16; #ifdef DEBUG printf("%3ld: fairness Rule 3.: _a_t = %d\n", depth, now._a_t); #endif } #endif II = 0; /* never */ goto Veri0; } #endif #ifdef PERMUTED if (reversing&2) { seed = rand(); p_reorder(seed); } #endif #ifndef NOREDUCE /* Look for a process with only safe transitions */ /* (special rules apply in the 2nd dfs) */ if (boq == -1 && From != To #ifdef SAFETY #if NCORE>1 && (depth < z_handoff) #endif ) #else #if NCORE>1 && ((a_cycles) || (!a_cycles && depth < z_handoff)) #endif #ifdef BCS && (sched_max > 0 || depth > BASE) #endif && (!(now._a_t&1) || (a_cycles && #ifndef BITSTATE #ifdef MA #ifdef VERI !((trpt-1)->proviso)) #else !(trpt->proviso)) #endif #else #ifdef VERI (trpt-1)->ostate && !(((char *)&((trpt-1)->ostate->state))[0] & 128)) #else !(((char *)&(trpt->ostate->state))[0] & 128)) #endif #endif #else #ifdef VERI (trpt-1)->ostate && (trpt-1)->ostate->proviso == 0) #else trpt->ostate->proviso == 0) #endif #endif )) #endif /* attempt Partial Order Reduction as preselect moves */ #ifdef BCS if (trpt->sched_limit < sched_max) #endif { for ALL_P { Resume: /* pick up here if preselect fails */ _this = pptr(II); tt = (int) ((P0 *)_this)->_p; ot = (uchar) ((P0 *)_this)->_t; if (trans[ot][tt]->atom & 8) { t = trans[ot][tt]; if (t->qu[0] != 0) { Ccheck++; if (!q_cond(II, t)) { continue; } Cholds++; } From = To = II; /* preselect process */ #ifdef NIBIS t->om = 0; #endif trpt->tau |= 32; /* preselect marker */ #ifdef DEBUG printf("%3ld: proc %d PreSelected (tau=%d)\n", depth, II, trpt->tau); #endif goto Again; } else { continue; } } } trpt->tau &= ~32; #endif #if !defined(NOREDUCE) || (defined(ETIM) && !defined(VERI)) Again: #endif trpt->o_pm &= ~(8|16|32|64); /* clear fairness-marks */ #ifndef NOFAIR if (fairness && boq == -1 #ifdef VERI && (!(trpt->tau&4) && !((trpt-1)->tau&128)) #endif && !(trpt->tau&8)) { /* A_bit = 1; Cnt = N in acc states with A_bit 0 */ if (!(now._a_t&2)) { if (a_cycles && (trpt->o_pm&2)) { /* Accepting state */ now._a_t |= 2; now._cnt[now._a_t&1] = now._nr_pr + 1; trpt->o_pm |= 8; #ifdef DEBUG printf("%3ld: fairness Rule 1: cnt=%d, _a_t=%d\n", depth, now._cnt[now._a_t&1], now._a_t); #endif } } else { /* A_bit = 0 when Cnt 0 */ if (now._cnt[now._a_t&1] == 1) { now._a_t &= ~2; now._cnt[now._a_t&1] = 0; trpt->o_pm |= 16; #ifdef DEBUG printf("%3ld: fairness Rule 3: _a_t = %d\n", depth, now._a_t); #endif } } } #endif #ifdef BCS trpt->bcs = trpt->b_pno = 0; /* initial */ if (From != To /* not a PO or atomic move */ && depth > BASE) /* there is a prior move */ { trpt->b_pno = now._last + BASE; trpt->bcs = B_PHASE1; #ifdef VERBOSE printf("%3ld: BCS phase 1 proc %d limit %d\n", depth, trpt->b_pno, trpt->sched_limit); #endif /* allow only process b_pno to move in this phase */ } c_switch: /* jumps here with bcs == B_PHASE2 with or wo B_FORCED added */ #ifdef VERBOSE printf("%3ld: BCS c_switch phase=%d pno=%d [forced %d]\n", depth, trpt->bcs, trpt->b_pno, (trpt->bcs&B_FORCED)?1:0); #endif #endif #ifdef P_RAND trpt->p_left = CNT_P; if (trpt->p_left > 1) { trpt->p_skip = rand() % (trpt->p_left); } else { trpt->p_skip = -1; } r_switch: #ifdef VERBOSE printf("%3ld: P_RAND r_switch p_skip=%d p_left=%d\n", depth, trpt->p_skip, trpt->p_left); #endif #endif for ALL_P { #ifdef PERMUTED if (reversing&2) { oII = II; if (From != To) { II = get_permuted(II); } } #endif #ifdef P_RAND if (trpt->p_skip >= 0) { trpt->p_skip--; /* skip random nr of procs */ #ifdef VERBOSE printf("%3ld: P_RAND skipping %d [new p_skip=%d p_left=%d]\n", depth, II, trpt->p_skip, trpt->p_left); #endif CONTINUE0; } if (trpt->p_left == 0) { #ifdef VERBOSE printf("%3ld: P_RAND done at %d\n", depth, II); #endif break; /* done */ } #ifdef VERBOSE printf("%3ld: P_RAND explore %d [p_left=%d]\n", depth, II, trpt->p_left); #endif trpt->p_left--; #endif #if SYNC /* no rendezvous with same proc */ if (boq != -1 && trpt->pr == II) { CONTINUE0; } #endif #ifdef BCS if ((trpt->bcs & B_PHASE1) && trpt->b_pno != II) { #ifdef VERBOSE printf("%3ld: BCS NotPre II=%d bcs=%d pno=%d [forced %d]\n", depth, II, trpt->bcs, trpt->b_pno, (trpt->bcs&B_FORCED)?1:0); #endif CONTINUE0; } #ifdef VERBOSE else if ((trpt->bcs & B_PHASE1) && trpt->b_pno == II) printf("%3ld: BCS IsPre II=%d bcs=%d pno=%d [forced %d]\n", depth, II, trpt->bcs, trpt->b_pno, (trpt->bcs&B_FORCED)?1:0); #endif if (trpt->bcs & B_PHASE2) /* 2nd phase */ { if (trpt->b_pno == II) /* was already done in phase 1 */ { #ifdef VERBOSE printf("%3ld: BCS NoRepeat II=%d bcs=%d pno=%d [forced %d]\n", depth, II, trpt->bcs, trpt->b_pno, (trpt->bcs&B_FORCED)?1:0); #endif CONTINUE0; } if (!(trpt->bcs & B_FORCED) /* unless forced */ && trpt->sched_limit >= sched_max) { #ifdef VERBOSE printf("%3ld: BCS Bound II=%d bcs=%d pno=%d [forced %d]\n", depth, II, trpt->bcs, trpt->b_pno, (trpt->bcs&B_FORCED)?1:0); #endif CONTINUE0; /* enforce bound */ } } #endif #ifdef VERI Veri0: #endif _this = pptr(II); tt = (int) ((P0 *)_this)->_p; ot = (uchar) ((P0 *)_this)->_t; #ifdef NIBIS /* don't repeat a previous preselected expansion */ /* could hit this if reduction proviso was false */ t = trans[ot][tt]; if (!(trpt->tau&4) && !(trpt->tau&1) && !(trpt->tau&32) && (t->atom & 8) && boq == -1 && From != To) { if (t->qu[0] == 0 || q_cond(II, t)) { _m = t->om; if (_m>_n||(_n>3&&_m!=0)) { _n=_m; } CONTINUE0; /* did it before */ } } #endif trpt->o_pm &= ~1; /* no move in this pid yet */ #ifdef EVENT_TRACE (trpt+1)->o_event = now._event; #endif /* Fairness: Cnt++ when Cnt == II */ #ifndef NOFAIR trpt->o_pm &= ~64; /* didn't apply rule 2 */ if (fairness && boq == -1 && !(trpt->o_pm&32) && (now._a_t&2) && now._cnt[now._a_t&1] == II+2) { now._cnt[now._a_t&1] -= 1; #ifdef VERI /* claim need not participate */ if (II == 1) now._cnt[now._a_t&1] = 1; #endif #ifdef DEBUG printf("%3ld: proc %d fairness ", depth, II); printf("Rule 2: --cnt to %d (%d)\n", now._cnt[now._a_t&1], now._a_t); #endif trpt->o_pm |= (32|64); } #endif #ifdef HAS_PRIORITY if (!highest_priority(((P0 *)_this)->_pid, II, t)) { CONTINUE0; } #else #ifdef HAS_PROVIDED if (!provided(II, ot, tt, t)) { CONTINUE0; } #endif #endif /* check all trans of proc II - escapes first */ #ifdef HAS_UNLESS trpt->e_state = 0; #endif (trpt+1)->pr = (uchar) II; (trpt+1)->st = tt; #ifdef T_RAND for (ooi = eoi = 0, t = trans[ot][tt]; t; t = t->nxt, ooi++) { if (strcmp(t->tp, "else") == 0 #ifdef HAS_UNLESS || t->e_trans != 0 #endif ) { eoi++; } } if (eoi > 0) { t = trans[ot][tt]; #ifdef VERBOSE printf("randomizer: suppressed, saw else or escape\n"); #endif } else if (ooi > 0) { eoi = rand()%ooi; #ifdef VERBOSE printf("randomizer: skip %d in %d\n", eoi, ooi); #endif for (t = trans[ot][tt]; t; t = t->nxt) if (eoi-- <= 0) break; } domore: for ( ; t && ooi > 0; t = t->nxt, ooi--) #else for (t = trans[ot][tt]; t; t = t->nxt) #endif { #ifdef HAS_UNLESS /* exploring all transitions from * a single escape state suffices */ if (trpt->e_state > 0 && trpt->e_state != t->e_trans) { #ifdef DEBUG printf("skip 2nd escape %d (did %d before)\n", t->e_trans, trpt->e_state); #endif break; } #endif #if defined(TRIX) && !defined(TRIX_ORIG) && !defined(BFS) (trpt+1)->p_bup = now._ids_[II]; #endif (trpt+1)->o_t = t; #ifdef INLINE #include FORWARD_MOVES P999: /* jumps here when move succeeds */ #else if (!(_m = do_transit(t, II))) { continue; } #endif #ifdef BCS if (depth > BASE && II >= BASE && From != To #ifndef BCS_NOFIX /* added 5.2.5: prior move was not po */ && !((trpt-(BASE+1))->tau & 32) #endif && boq == -1 && (trpt->bcs & B_PHASE2) && trpt->b_pno != II /* context switch */ && !(trpt->bcs & B_FORCED)) /* unless forced */ { (trpt+1)->sched_limit = 1 + trpt->sched_limit; #ifdef VERBOSE printf("%3ld: up sched count to %d\n", depth, (trpt+1)->sched_limit); #endif } else { (trpt+1)->sched_limit = trpt->sched_limit; #ifdef VERBOSE printf("%3ld: keep sched count at %d\n", depth, (trpt+1)->sched_limit); #endif } #endif if (boq == -1) #ifdef CTL /* for branching-time, can accept reduction only if */ /* the persistent set contains just 1 transition */ { if ((trpt->tau&32) && (trpt->o_pm&1)) trpt->tau |= 16; trpt->o_pm |= 1; /* we moved */ } #else trpt->o_pm |= 1; /* we moved */ #endif #ifdef LOOPSTATE if (loopstate[ot][tt]) { #ifdef VERBOSE printf("exiting from loopstate:\n"); #endif trpt->tau |= 16; cnt_loops++; } #endif #ifdef PEG peg[t->forw]++; #endif #if defined(VERBOSE) || defined(CHECK) #if defined(SVDUMP) cpu_printf("%3ld: proc %d exec %d \n", depth, II, t->t_id); #else cpu_printf("%3ld: proc %d exec %d, %d to %d, %s %s %s %saccepting [tau=%d]\n", depth, II, t->forw, tt, t->st, t->tp, (t->atom&2)?"atomic":"", (boq != -1)?"rendez-vous":"", (trpt->o_pm&2)?"":"non-", trpt->tau); #ifdef HAS_UNLESS if (t->e_trans) cpu_printf("\t(escape to state %d)\n", t->st); #endif #endif #ifdef T_RAND cpu_printf("\t(randomizer %d)\n", ooi); #endif #endif #ifdef HAS_LAST #ifdef VERI if (II != 0) #endif now._last = II - BASE; #endif #ifdef HAS_UNLESS trpt->e_state = t->e_trans; #endif depth++; trpt++; trpt->pr = (uchar) II; trpt->st = tt; trpt->o_pm &= ~(2|4); if (t->st > 0) { ((P0 *)_this)->_p = t->st; /* moved down reached[ot][t->st] = 1; */ } #ifndef SAFETY if (a_cycles) { #if (ACCEPT_LAB>0 && !defined(NP)) || (PROG_LAB>0 && defined(HAS_NP)) int ii; #endif #define P__Q ((P0 *)pptr(ii)) #if ACCEPT_LAB>0 #ifdef NP /* state 1 of np_ claim is accepting */ if (((P0 *)pptr(0))->_p == 1) trpt->o_pm |= 2; #else for (ii = 0; ii < (int) now._nr_pr; ii++) { if (accpstate[P__Q->_t][P__Q->_p]) { trpt->o_pm |= 2; break; } } #endif #endif #if defined(HAS_NP) && PROG_LAB>0 for (ii = 0; ii < (int) now._nr_pr; ii++) { if (progstate[P__Q->_t][P__Q->_p]) { trpt->o_pm |= 4; break; } } #endif #undef P__Q } #endif trpt->o_t = t; trpt->o_n = _n; trpt->o_ot = ot; trpt->o_tt = tt; trpt->o_To = To; trpt->o_m = _m; trpt->tau = 0; #ifdef PERMUTED if (reversing&2) { trpt->seed = seed; trpt->oII = oII; } #endif #if defined(T_RAND) && !defined(BFS) trpt->oo_i = ooi; #endif if (boq != -1 || (t->atom&2)) { trpt->tau |= 8; #ifdef VERI /* atomic sequence in claim */ if((trpt-1)->tau&4) trpt->tau |= 4; else trpt->tau &= ~4; } else { if ((trpt-1)->tau&4) trpt->tau &= ~4; else trpt->tau |= 4; } /* if claim allowed timeout, so */ /* does the next program-step: */ if (((trpt-1)->tau&1) && !(trpt->tau&4)) trpt->tau |= 1; #else } else trpt->tau &= ~8; #endif if (boq == -1 && (t->atom&2)) { From = To = II; nlinks++; } else { From = FROM_P; To = UPTO_P; } #if NCORE>1 && defined(FULL_TRAIL) if (upto > 0) { Push_Stack_Tree(II, t->t_id); } #endif #ifdef TRIX if (processes[II]) { processes[II]->modified = 1; /* transition in II */ #ifdef V_TRIX printf("%4d: process %d modified\n", depth, II); } else { printf("%4d: process %d modified but gone (%p)\n", depth, II, trpt); #endif } #endif goto Down; /* pseudo-recursion */ Up: #ifdef TRIX #ifndef TRIX_ORIG #ifndef BFS now._ids_[trpt->pr] = trpt->p_bup; #endif #else if (processes[trpt->pr]) { processes[trpt->pr]->modified = 1; /* reverse move */ #ifdef V_TRIX printf("%4d: unmodify pr %d (%p)\n", depth, trpt->pr, trpt); } else { printf("%4d: unmodify pr %d (gone) (%p)\n", depth, trpt->pr, trpt); #endif } #endif #endif #ifdef CHECK cpu_printf("%d: Up - %s\n", depth, (trpt->tau&4)?"claim":"program"); #endif #if NCORE>1 iam_alive(); #ifdef USE_DISK mem_drain(); #endif #endif #if defined(MA) || NCORE>1 if (depth <= 0) return; /* e.g., if first state is old, after a restart */ #endif #ifdef SC if (CNT1 > CNT2 && depth < hiwater - (HHH-DDD) - 2) { trpt += DDD; disk2stack(); maxdepth -= DDD; hiwater -= DDD; if(verbose) printf("unzap %ld: %ld\n", CNT2, hiwater); } #endif #ifndef SAFETY if ((now._a_t&1) && depth <= A_depth) return; /* to checkcycles() */ #endif #ifndef NOFAIR if (trpt->o_pm&128) /* fairness alg */ { now._cnt[now._a_t&1] = trpt->bup.oval; _n = 1; trpt->o_pm &= ~128; depth--; trpt--; #if defined(VERBOSE) || defined(CHECK) printf("%3ld: reversed fairness default move\n", depth); #endif goto Q999; } #endif #ifdef HAS_LAST #ifdef VERI { long d; Trail *trl; now._last = 0; for (d = 1; d < depth; d++) { trl = getframe(depth-d); /* was (trpt-d) */ if (trl->pr != 0) { now._last = trl->pr - BASE; break; } } } #else now._last = (depth<1)?0:(trpt-1)->pr; #endif #endif #ifdef EVENT_TRACE now._event = trpt->o_event; #endif t = trpt->o_t; _n = trpt->o_n; ot = trpt->o_ot; II = trpt->pr; tt = trpt->o_tt; _this = Pptr(II); To = trpt->o_To; _m = trpt->o_m; #ifdef PERMUTED if (reversing&2) { seed = trpt->seed; oII = trpt->oII; } #endif #if defined(T_RAND) && !defined(BFS) ooi = trpt->oo_i; #endif #ifdef INLINE_REV _m = do_reverse(t, II, _m); #else #include BACKWARD_MOVES R999: /* jumps here when done */ #endif #ifdef VERBOSE cpu_printf("%3ld: proc %d reverses %d, %d to %d\n", depth, II, t->forw, tt, t->st); cpu_printf("\t%s [abit=%d,adepth=%ld,tau=%d,%d]\n", t->tp, now._a_t, A_depth, trpt->tau, (trpt-1)->tau); #endif #ifndef NOREDUCE /* pass the proviso tags */ if ((trpt->tau&8) /* rv or atomic */ && (trpt->tau&16)) (trpt-1)->tau |= 16; #ifdef SAFETY if ((trpt->tau&8) /* rv or atomic */ && (trpt->tau&64)) (trpt-1)->tau |= 64; #endif #endif #if defined(BCS) && (defined(NOREDUCE) || !defined(SAFETY)) if ((trpt->tau&8) && (trpt->tau&64)) (trpt-1)->tau |= 64; #endif depth--; trpt--; #ifdef NSUCC trpt->n_succ++; #endif #ifdef NIBIS (trans[ot][tt])->om = _m; /* head of list */ #endif /* i.e., not set if rv fails */ if (_m) { reached[ot][t->st] = 1; reached[ot][tt] = 1; } #ifdef HAS_UNLESS else trpt->e_state = 0; /* undo */ #endif if (_m>_n||(_n>3&&_m!=0)) _n=_m; ((P0 *)_this)->_p = tt; } /* all options */ #ifdef T_RAND if (!t && ooi > 0) { t = trans[ot][tt]; #ifdef VERBOSE printf("randomizer: continue for %d more\n", ooi); #endif goto domore; } #ifdef VERBOSE else printf("randomizer: done\n"); #endif #endif #ifndef NOFAIR /* Fairness: undo Rule 2 */ if ((trpt->o_pm&32) && (trpt->o_pm&64)) { if (trpt->o_pm&1) { #ifdef VERI if (now._cnt[now._a_t&1] == 1) now._cnt[now._a_t&1] = 2; #endif now._cnt[now._a_t&1] += 1; #ifdef VERBOSE printf("%3ld: proc %d fairness ", depth, II); printf("undo Rule 2, cnt=%d, _a_t=%d\n", now._cnt[now._a_t&1], now._a_t); #endif trpt->o_pm &= ~(32|64); } else { if (_n > 0) { trpt->o_pm &= ~64; II = INI_P; } } } #endif #ifdef VERI if (II == 0) { break; /* never claim */ } #endif CONTINUE; } /* ALL_P */ #ifdef NSUCC tally_succ(trpt->n_succ); #endif #ifdef P_RAND if (trpt->p_left > 0 && NDONE_P) { trpt->p_skip = -1; /* probably rendundant */ #ifdef VERBOSE printf("%3ld: P_RAND -- explore remainder\n", depth); #endif goto r_switch; /* explore the remaining procs */ } else { #ifdef VERBOSE printf("%3ld: P_RAND -- none left\n", depth); #endif } #endif #ifdef BCS if (trpt->bcs & B_PHASE1) { trpt->bcs = B_PHASE2; /* start 2nd phase */ if (_n == 0 || !(trpt->tau&64)) /* pre-move unexecutable or led to stackstate */ { trpt->bcs |= B_FORCED; /* forced switch */ } #ifdef VERBOSE printf("%3ld: BCS move to phase 2, _n=%d %s\n", depth, _n, (trpt->bcs & B_FORCED)?"forced":"free"); #endif From = FROM_P; To = UPTO_P; goto c_switch; } if (_n == 0 /* no process could move */ && II >= BASE /* not the never claim */ && trpt->sched_limit >= sched_max) { _n = 1; #ifdef VERBOSE printf("%3ld: BCS not a deadlock\n", depth); #endif } #endif #ifndef NOFAIR /* Fairness: undo Rule 2 */ if (trpt->o_pm&32) /* remains if proc blocked */ { #ifdef VERI if (now._cnt[now._a_t&1] == 1) now._cnt[now._a_t&1] = 2; #endif now._cnt[now._a_t&1] += 1; #ifdef VERBOSE printf("%3ld: proc -- fairness ", depth); printf("undo Rule 2, cnt=%d, _a_t=%d\n", now._cnt[now._a_t&1], now._a_t); #endif trpt->o_pm &= ~32; } #ifndef NP if (fairness && _n == 0 /* nobody moved */ #ifdef VERI && !(trpt->tau&4) /* in program move */ #endif && !(trpt->tau&8) /* not an atomic one */ #ifdef ETIM && (trpt->tau&1) /* already tried timeout */ #endif #ifndef NOREDUCE /* see below */ && !((trpt->tau&32) && (_n == 0 || (trpt->tau&16))) #endif && now._cnt[now._a_t&1] > 0) /* needed more procs */ { depth++; trpt++; trpt->o_pm |= 128 | ((trpt-1)->o_pm&(2|4)); trpt->bup.oval = now._cnt[now._a_t&1]; now._cnt[now._a_t&1] = 1; #ifdef VERI trpt->tau = 4; #else trpt->tau = 0; #endif From = FROM_P; To = UPTO_P; #if defined(VERBOSE) || defined(CHECK) printf("%3ld: fairness default move ", depth); printf("(all procs block)\n"); #endif goto Down; } #endif Q999: /* returns here with _n>0 when done */; if (trpt->o_pm&8) { now._a_t &= ~2; now._cnt[now._a_t&1] = 0; trpt->o_pm &= ~8; #ifdef VERBOSE printf("%3ld: fairness undo Rule 1, _a_t=%d\n", depth, now._a_t); #endif } if (trpt->o_pm&16) { now._a_t |= 2; now._cnt[now._a_t&1] = 1; trpt->o_pm &= ~16; #ifdef VERBOSE printf("%3ld: fairness undo Rule 3, _a_t=%d\n", depth, now._a_t); #endif } #endif #ifndef NOREDUCE #ifdef SAFETY #ifdef LOOPSTATE /* at least one move that was preselected at this */ /* level, blocked or was a loop control flow point */ if ((trpt->tau&32) && (_n == 0 || (trpt->tau&16))) #else /* preselected move - no successors outside stack */ if ((trpt->tau&32) && !(trpt->tau&64)) #endif { From = FROM_P; To = UPTO_P; /* undo From == To */ #ifdef DEBUG printf("%3ld: proc %d UnSelected (_n=%d, tau=%d)\n", depth, II+1, _n, trpt->tau); #endif _n = 0; trpt->tau &= ~(16|32|64); if (MORE_P) /* II already restored and updated */ { goto Resume; } else { goto Again; } } #else /* at least one move that was preselected at this */ /* level, blocked or truncated at the next level */ if ((trpt->tau&32) && (_n == 0 || (trpt->tau&16))) { #ifdef DEBUG printf("%3ld: proc %d UnSelected (_n=%d, tau=%d)\n", depth, II+1, (int) _n, trpt->tau); #endif if (a_cycles && (trpt->tau&16)) { if (!(now._a_t&1)) { #ifdef DEBUG printf("%3ld: setting proviso bit\n", depth); #endif #ifndef BITSTATE #ifdef MA #ifdef VERI (trpt-1)->proviso = 1; #else trpt->proviso = 1; #endif #else #ifdef VERI if ((trpt-1)->ostate) ((char *)&((trpt-1)->ostate->state))[0] |= 128; #else ((char *)&(trpt->ostate->state))[0] |= 128; #endif #endif #else #ifdef VERI if ((trpt-1)->ostate) (trpt-1)->ostate->proviso = 1; #else trpt->ostate->proviso = 1; #endif #endif From = FROM_P; To = UPTO_P; _n = 0; trpt->tau &= ~(16|32|64); goto Again; /* do full search */ } /* else accept reduction */ } else { From = FROM_P; To = UPTO_P; _n = 0; trpt->tau &= ~(16|32|64); if (MORE_P) /* II already updated */ { goto Resume; } else { goto Again; } } } #endif #endif if (_n == 0 || ((trpt->tau&4) && (trpt->tau&2))) { #ifdef DEBUG cpu_printf("%3ld: no move [II=%d, tau=%d, boq=%d]\n", depth, II, trpt->tau, boq); #endif #if SYNC /* ok if a rendez-vous fails: */ if (boq != -1) goto Done; #endif /* ok if no procs or we're at maxdepth */ if ((now._nr_pr == 0 && (!strict || qs_empty())) || depth >= maxdepth-1) goto Done; /* undo change from 5.2.3 */ if ((trpt->tau&8) && !(trpt->tau&4)) { trpt->tau &= ~(1|8); /* 1=timeout, 8=atomic */ From = FROM_P; To = UPTO_P; #ifdef DEBUG cpu_printf("%3ld: atomic step proc %d unexecutable\n", depth, II+1); #endif #ifdef VERI trpt->tau |= 4; /* switch to claim */ #endif goto AllOver; } #ifdef ETIM if (!(trpt->tau&1)) /* didn't try timeout yet */ { #ifdef VERI if (trpt->tau&4) { #ifndef NTIM if (trpt->tau&2) /* requested */ #endif { trpt->tau |= 1; trpt->tau &= ~2; #ifdef DEBUG cpu_printf("%d: timeout\n", depth); #endif goto Stutter; } } else { /* only claim can enable timeout */ if ((trpt->tau&8) && !((trpt-1)->tau&4)) /* blocks inside an atomic */ goto BreakOut; #ifdef DEBUG cpu_printf("%d: req timeout\n", depth); #endif (trpt-1)->tau |= 2; /* request */ #if NCORE>1 && defined(FULL_TRAIL) if (upto > 0) { Pop_Stack_Tree(); } #endif goto Up; } #else #ifdef DEBUG cpu_printf("%d: timeout\n", depth); #endif trpt->tau |= 1; goto Again; #endif } #endif #ifdef VERI BreakOut: #ifndef NOSTUTTER if (!(trpt->tau&4)) { trpt->tau |= 4; /* claim stuttering */ trpt->tau |= 128; /* stutter mark */ #ifdef DEBUG cpu_printf("%d: claim stutter\n", depth); #endif goto Stutter; } #else ; #endif #else if (!noends && !a_cycles && !endstate()) { depth--; trpt--; /* new 4.2.3 */ uerror("invalid end state"); depth++; trpt++; } #ifndef NOSTUTTER else if (a_cycles && (trpt->o_pm&2)) /* new 4.2.4 */ { depth--; trpt--; uerror("accept stutter"); depth++; trpt++; } #endif #endif } Done: if (!(trpt->tau&8)) /* not in atomic seqs */ { #ifndef MA #if defined(FULLSTACK) || defined(CNTRSTACK) #ifdef VERI if (boq == -1 && (((trpt->tau&4) && !(trpt->tau&128)) || ( (trpt-1)->tau&128))) #else if (boq == -1) #endif { #ifdef DEBUG2 #if defined(FULLSTACK) printf("%ld: zapping %u (%d)\n", depth, trpt->ostate, (trpt->ostate)?trpt->ostate->tagged:0); #endif #endif onstack_zap(); } #endif #else #ifdef VERI if (boq == -1 && (((trpt->tau&4) && !(trpt->tau&128)) || ( (trpt-1)->tau&128))) #else if (boq == -1) #endif { #ifdef DEBUG printf("%ld: zapping\n", depth); #endif onstack_zap(); #ifndef NOREDUCE if (trpt->proviso) g_store((char *) &now, vsize, 1); #endif } #endif #ifndef SAFETY if (_n != 0 #ifdef VERI /* --after-- a program-step, i.e., */ /* after backtracking a claim-step */ && (trpt->tau&4) /* with at least one running process */ /* unless in a stuttered accept state */ && ((now._nr_pr > 1) || (trpt->o_pm&2)) #endif && !(now._a_t&1)) { #ifndef NOFAIR if (fairness) { #ifdef VERBOSE cpu_printf("Consider check %d %d...\n", now._a_t, now._cnt[0]); #endif if ((now._a_t&2) /* A-bit */ && (now._cnt[0] == 1)) checkcycles(); } else #endif if (a_cycles && (trpt->o_pm&2)) checkcycles(); } #endif } if (depth > 0) { #if NCORE>1 && defined(FULL_TRAIL) if (upto > 0) { Pop_Stack_Tree(); } #endif goto Up; } } #else void new_state(void) { /* place holder */ } #endif void spin_assert(int a, char *s, int ii, int tt, Trans *t) { if (!a && !noasserts) { char bad[1024]; strcpy(bad, "assertion violated "); if (strlen(s) > 1000) { strncpy(&bad[19], (const char *) s, 1000); bad[1019] = '\0'; } else strcpy(&bad[19], s); uerror(bad); } } #ifndef NOBOUNDCHECK int Boundcheck(int x, int y, int a1, int a2, Trans *a3) { spin_assert((x >= 0 && x < y), "- invalid array index", a1, a2, a3); return x; } #endif int do_hashgen = 0; void wrap_stats(void) { if (nShadow>0) printf("%9.8g states, stored (%g visited)\n", nstates - nShadow, nstates); else printf("%9.8g states, stored\n", nstates); #ifdef BFS_PAR if (bfs_punt > 0) printf("%9.8g states lost (lack of queue memory)\n", (double) bfs_punt); #endif #ifdef BFS #if SYNC printf(" %8g nominal states (- rv and atomic)\n", nstates-midrv-nlinks+revrv); printf(" %8g rvs succeeded\n", midrv-failedrv); #else printf(" %8g nominal states (stored-atomic)\n", nstates-nlinks); #endif #ifdef DEBUG printf(" %8g midrv\n", midrv); printf(" %8g failedrv\n", failedrv); printf(" %8g revrv\n", revrv); #endif #endif printf("%9.8g states, matched\n", truncs); #ifdef CHECK printf("%9.8g matches within stack\n",truncs2); #endif if (nShadow>0) printf("%9.8g transitions (= visited+matched)\n", nstates+truncs); else printf("%9.8g transitions (= stored+matched)\n", nstates+truncs); printf("%9.8g atomic steps\n", nlinks); if (nlost) printf("%g lost messages\n", (double) nlost); #ifndef BITSTATE #ifndef MA printf("hash conflicts: %9.8g (resolved)\n", hcmp); #if !defined(AUTO_RESIZE) && !defined(BFS_PAR) if (hcmp > (double) (1< 1.0) { fp = 100. / fp; while (fp > 2.) { fi++; fp /= 2.; } if (fi > 0) { printf(" (hint: rerun with -w%d to reduce runtime)", ssize-fi); } } printf("\n"); } #endif #endif #else #ifdef CHECK printf("%8g states allocated for dfs stack\n", ngrabs); #endif if (udmem) printf("\nhash factor: %4g (best if > 100.)\n\n", (double)(((double) udmem) * 8.0) / (double) nstates); else printf("\nhash factor: %4g (best if > 100.)\n\n", ((double)(((ulong)1)<<(ssize-10)) / (double) nstates) * 1024.0); printf("bits set per state: %u (-k%u)\n", hfns, hfns); if (do_hashgen) printf("hash polynomial used: 0x%.8x\n", HASH_CONST[HASH_NR]); if (s_rand != 12345) printf("random seed used: %u\n", (uint) (s_rand-1)); #endif #if defined(BFS_DISK) && !defined(BFS_PAR) printf("bfs disk reads: %ld writes %ld -- diff %ld\n", bfs_dsk_reads, bfs_dsk_writes, bfs_dsk_writes-bfs_dsk_reads); if (bfs_dsk_read >= 0) (void) close(bfs_dsk_read); if (bfs_dsk_write >= 0) (void) close(bfs_dsk_write); (void) unlink("pan_bfs_dsk.tmp"); #endif } void wrapup(void) { double nr1, nr2, nr3 = 0.0, nr4, nr5 = 0.0; #ifdef BFS_PAR if (who_am_i != 0) { pan_exit(0); } #endif #if NCORE>1 if (verbose) cpu_printf("wrapup -- %lu error(s)\n", errors); if (core_id != 0) { #ifdef USE_DISK void dsk_stats(void); dsk_stats(); #endif if (search_terminated != NULL) { *search_terminated |= 2; /* wrapup */ } exit(0); /* normal termination, not an error */ } #endif #if !defined(WIN32) && !defined(WIN64) signal(SIGINT, SIG_DFL); #endif printf("\n(%s)\n", SpinVersion); if (!done) printf("Warning: Search not completed\n"); #if defined(BFS_PAR) && !defined(BITSTATE) if (bfs_punt > 0) printf("Warning: Search incomplete\n"); #endif #ifdef SC (void) unlink((const char *)stackfile); #endif #ifdef BFS_PAR printf(" + Multi-Core (using %d cores)\n", Cores); #ifdef BFS_SEP_HASH printf(" + Separate Hash Tables\n"); #endif #ifdef BFS_DISK printf(" + Disk storage\n"); #endif #endif #if NCORE>1 if (a_cycles) { printf(" + Multi-Core (NCORE=%d)\n", NCORE); } else { printf(" + Multi-Core (NCORE=%d -z%ld)\n", NCORE, z_handoff); } #endif #ifdef BFS printf(" + Breadth-First Search\n"); #endif #ifndef NOREDUCE printf(" + Partial Order Reduction\n"); #endif #ifdef PERMUTED printf(" + Process Scheduling Permutation\n"); #endif #ifdef P_REVERSE printf(" + Reverse Depth-First Search Order\n"); #endif if (t_reverse) printf(" + Reverse Transition Ordering\n"); #ifdef T_RAND printf(" + Randomized Transition Ordering\n"); #endif #ifdef P_RAND printf(" + Randomized Process Ordering\n"); #endif #ifdef BCS printf(" + Scheduling Restriction (-L%d)\n", sched_max); #endif #ifdef TRIX printf(" + Tree Index Compression\n"); #endif #ifdef COLLAPSE printf(" + Compression\n"); #endif #ifdef MA printf(" + Graph Encoding (-DMA=%d)\n", MA); #ifdef R_XPT printf(" Restarted from checkpoint %s.xpt\n", PanSource); #endif #endif #ifdef CHECK #ifdef FULLSTACK printf(" + FullStack Matching\n"); #endif #ifdef CNTRSTACK printf(" + CntrStack Matching\n"); #endif #endif #ifdef PERMUTED if (reversing & 2) { if (p_reorder == set_permuted) { printf(" + Permuted\n"); } if (p_reorder == set_reversed) { printf(" + Reversed\n"); } if (p_reorder == set_rotated) { printf(" + Rotated %d\n", p_rotate); } if (p_reorder == set_randrot) { printf(" + RandRotated\n"); } } #endif #ifdef BITSTATE printf("\nBit statespace search for:\n"); #else #ifdef HC printf("\nHash-Compact %d search for:\n", HC); #else printf("\nFull statespace search for:\n"); #endif #endif #ifdef EVENT_TRACE #ifdef NEGATED_TRACE printf(" notrace assertion +\n"); #else printf(" trace assertion +\n"); #endif #endif #ifdef VERI printf(" never claim +"); printf(" (%s)\n", procname[((Pclaim *)pptr(0))->_t]); printf(" assertion violations "); if (noasserts) printf("- (disabled by -A flag)\n"); else printf("+ (if within scope of claim)\n"); #else #ifdef NOCLAIM printf(" never claim - (not selected)\n"); #else printf(" never claim - (none specified)\n"); #endif printf(" assertion violations "); if (noasserts) printf("- (disabled by -A flag)\n"); else printf("+\n"); #endif #ifndef SAFETY #ifdef NP printf(" non-progress cycles "); #else printf(" acceptance cycles "); #endif if (a_cycles) printf("+ (fairness %sabled)\n", fairness?"en":"dis"); else printf("- (not selected)\n"); #else #if !defined(BFS_PAR) || !defined(L_BOUND) printf(" cycle checks - (disabled by -DSAFETY)\n"); #else printf(" cycle checks + (bound %d)\n", L_bound); #endif #endif #ifdef VERI printf(" invalid end states - "); printf("(disabled by "); if (noends) printf("-E flag)\n\n"); else printf("never claim)\n\n"); #else printf(" invalid end states "); if (noends) printf("- (disabled by -E flag)\n\n"); else printf("+\n\n"); #endif printf("State-vector %d byte, depth reached %ld", hmax, #if NCORE>1 (nr_handoffs * z_handoff) + #endif mreached); printf(", errors: %lu\n", errors); fflush(stdout); #ifdef MA if (done) { extern void dfa_stats(void); if (maxgs+a_cycles+2 < MA) printf("MA stats: -DMA=%d is sufficient\n", maxgs+a_cycles+2); dfa_stats(); } #endif wrap_stats(); #ifdef CHECK printf("stackframes: %d/%d\n\n", smax, svmax); printf("stats: fa %ld, fh %ld, zh %ld, zn %ld - ", Fa, Fh, Zh, Zn); printf("check %ld holds %ld\n", Ccheck, Cholds); printf("stack stats: puts %ld, probes %ld, zaps %ld\n", PUT, PROBE, ZAPS); #else printf("\n"); #endif #if !defined(BITSTATE) && defined(NOCOMP) if (!verbose) { goto jump_here; } #endif #if 1 nr1 = (nstates-nShadow)* (double)(hmax+sizeof(H_el)-sizeof(unsigned)); #ifdef BFS nr2 = 0.0; #else nr2 = (double) ((maxdepth+3)*sizeof(Trail)); #endif #ifndef BITSTATE #if !defined(MA) || defined(COLLAPSE) nr3 = (double) (ONE_L<1 && !defined(SEP_STATE) tmp_nr -= ((double) NCORE * LWQ_SIZE) + GWQ_SIZE; #endif if (tmp_nr < 0.0) tmp_nr = 0.; printf("Stats on memory usage (in Megabytes):\n"); printf("%9.3f equivalent memory usage for states", nr1/1048576.); /* 1024*1024=1048576 */ printf(" (stored*(State-vector + overhead))\n"); #if NCORE>1 && !defined(WIN32) && !defined(WIN64) printf("%9.3f shared memory reserved for state storage\n", mem_reserved/1048576.); #ifdef SEP_HEAP printf(" in %d local heaps of %7.3f MB each\n", NCORE, mem_reserved/(NCORE*1048576.)); #endif printf("\n"); #endif #ifdef BITSTATE if (udmem) printf("%9.3f memory used for hash array (-M%ld)\n", nr3/1048576., udmem/(1024L*1024L)); else printf("%9.3f memory used for hash array (-w%d)\n", nr3/1048576., ssize); if (nr5 > 0.0) printf("%9.3f memory used for bit stack\n", nr5/1048576.); remainder = remainder - nr3 - nr5; #else #ifndef USE_TDH printf("%9.3f actual memory usage for states", tmp_nr/1048576.); remainder -= tmp_nr; if (tmp_nr > 0.) { if (tmp_nr < nr1) { printf(" (compression: %.2f%%)\n", (100.0*tmp_nr)/nr1); } else { printf("\n"); } } else { printf(" (less than 1k)\n"); } #ifndef MA if (tmp_nr > 0. && tmp_nr < nr1) { printf(" state-vector as stored = %.0f byte", (tmp_nr)/(nstates-nShadow) - (double) (sizeof(H_el) - sizeof(unsigned))); printf(" + %ld byte overhead\n", (long int) sizeof(H_el)-sizeof(unsigned)); } #endif #endif #if !defined(MA) || defined(COLLAPSE) #ifdef BFS_PAR printf("%9.3f shared memory used for hash table (-w%d)\n", ((double) bfs_pre_allocated)/1048576., ssize); #else printf("%9.3f memory used for hash table (-w%d)\n", nr3/1048576., ssize); remainder -= nr3; #endif #endif #endif #ifndef BFS printf("%9.3f memory used for DFS stack (-m%ld)\n", nr2/1048576., maxdepth); remainder -= nr2; #endif #if NCORE>1 remainder -= ((double) NCORE * LWQ_SIZE) + GWQ_SIZE; printf("%9.3f shared memory used for work-queues\n", (GWQ_SIZE + (double) NCORE * LWQ_SIZE) /1048576.); printf(" in %d queues of %7.3f MB each", NCORE, (double) LWQ_SIZE /1048576.); #ifndef NGQ printf(" + a global q of %7.3f MB\n", (double) GWQ_SIZE / 1048576.); #else printf("\n"); #endif #endif if (remainder - fragment > 1048576.) { printf("%9.3f other (proc and chan stacks)\n", (remainder-fragment)/1048576.); } if (fragment > 1048576.) { printf("%9.3f memory lost to fragmentation\n", fragment/1048576.); } #ifdef BFS_PAR printf("%9.3f total non-shared memory usage\n\n", memcnt/1048576.); #else printf("%9.3f total actual memory usage\n\n", memcnt/1048576.); #endif } #ifndef MA else #endif #endif #if !defined(BITSTATE) && defined(NOCOMP) jump_here: #endif #ifndef MA printf("%9.3f memory usage (Mbyte)\n", memcnt/1048576.); #endif #ifdef BFS_PAR bfs_report_mem(); #else printf("\n"); #endif #ifdef COLLAPSE printf("nr of templates: [ 0:globals 1:chans 2:procs ]\n"); printf("collapse counts: [ "); { int i; for (i = 0; i < 256+2; i++) if (ncomps[i] != 0) printf("%d:%lu ", i, ncomps[i]); printf("]\n"); } #endif #ifdef TRIX if (verbose) { int i; printf("TRIX counts:\n"); printf(" processes: "); for (i = 0; i < MAXPROC; i++) if (_p_count[i] != 0) { printf("%3d:%ld ", i, _p_count[i]); } printf("\n channels : "); for (i = 0; i < MAXQ; i++) if (_c_count[i] != 0) { printf("%3d:%ld ", i, _c_count[i]); } printf("\n\n"); } #endif if ((done || verbose) && !no_rck) do_reach(); #ifdef PEG { int i; printf("\nPeg Counts (transitions executed):\n"); for (i = 1; i < NTRANS; i++) { if (peg[i]) putpeg(i, peg[i]); } } #endif #ifdef VAR_RANGES dumpranges(); #endif #ifdef SVDUMP if (vprefix > 0) close(svfd); #endif #ifdef LOOPSTATE printf("%g loopstates hit\n", cnt_loops); #endif #ifdef NSUCC dump_succ(); #endif #if NCORE>1 && defined(T_ALERT) crash_report(); #endif #ifndef BFS_PAR pan_exit(0); #endif } void stopped(int arg) { #ifdef BFS_PAR bfs_shutdown("interrupted"); #endif printf("Interrupted\n"); #if NCORE>1 was_interrupted = 1; #endif wrapup(); pan_exit(0); } /* * super fast hash, based on Paul Hsieh's function * http://www.azillionmonkeys.com/qed/hash.html */ #include #undef get16bits #if defined(__GNUC__) && defined(__i386__) #define get16bits(d) (*((const uint16_t *) (d))) #else #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\ +(uint32_t)(((const uint8_t *)(d))[0]) ) #endif void d_sfh(uchar *s, int len) { uint32_t h = len, tmp; int rem; rem = len & 3; len >>= 2; for ( ; len > 0; len--) { h += get16bits(s); tmp = (get16bits(s+2) << 11) ^ h; h = (h << 16) ^ tmp; s += 2*sizeof(uint16_t); h += h >> 11; } switch (rem) { case 3: h += get16bits(s); h ^= h << 16; h ^= s[sizeof(uint16_t)] << 18; h += h >> 11; break; case 2: h += get16bits(s); h ^= h << 11; h += h >> 17; break; case 1: h += *s; h ^= h << 10; h += h >> 1; break; } h ^= h << 3; h += h >> 5; h ^= h << 4; h += h >> 17; h ^= h << 25; h += h >> 6; K1 = h; } #if WS>4 /* 64-bit Jenkins hash, 1997 * http://burtleburtle.net/bob/c/lookup8.c */ #define mix(a,b,c) \ { a -= b; a -= c; a ^= (c>>43); \ b -= c; b -= a; b ^= (a<<9); \ c -= a; c -= b; c ^= (b>>8); \ a -= b; a -= c; a ^= (c>>38); \ b -= c; b -= a; b ^= (a<<23); \ c -= a; c -= b; c ^= (b>>5); \ a -= b; a -= c; a ^= (c>>35); \ b -= c; b -= a; b ^= (a<<49); \ c -= a; c -= b; c ^= (b>>11); \ a -= b; a -= c; a ^= (c>>12); \ b -= c; b -= a; b ^= (a<<18); \ c -= a; c -= b; c ^= (b>>22); \ } #else /* 32-bit Jenkins hash, 2006 * http://burtleburtle.net/bob/c/lookup3.c */ #define rot(x,k) (((x)<<(k))|((x)>>(32-(k)))) #define mix(a,b,c) \ { a -= c; a ^= rot(c, 4); c += b; \ b -= a; b ^= rot(a, 6); a += c; \ c -= b; c ^= rot(b, 8); b += a; \ a -= c; a ^= rot(c,16); c += b; \ b -= a; b ^= rot(a,19); a += c; \ c -= b; c ^= rot(b, 4); b += a; \ } #define final(a,b,c) \ { c ^= b; c -= rot(b,14); \ a ^= c; a -= rot(c,11); \ b ^= a; b -= rot(a,25); \ c ^= b; c -= rot(b,16); \ a ^= c; a -= rot(c,4); \ b ^= a; b -= rot(a,14); \ c ^= b; c -= rot(b,24); \ } #endif void d_hash(uchar *kb, int nbytes) { uint8_t *bp; #if WS>4 uint64_t a = 0, b, c, n; const uint64_t *k = (uint64_t *) kb; #else uint32_t a = 0, b, c, n; const uint32_t *k = (uint32_t *) kb; #endif n = nbytes/WS; /* nr of words */ /* extend to multiple of words, if needed */ a = WS - (nbytes % WS); if (a > 0 && a < WS) { n++; bp = kb + nbytes; switch (a) { #if WS>4 case 7: *bp++ = 0; /* fall thru */ case 6: *bp++ = 0; /* fall thru */ case 5: *bp++ = 0; /* fall thru */ case 4: *bp++ = 0; /* fall thru */ #endif case 3: *bp++ = 0; /* fall thru */ case 2: *bp++ = 0; /* fall thru */ case 1: *bp = 0; case 0: break; } } #if WS>4 b = HASH_CONST[HASH_NR]; c = 0x9e3779b97f4a7c13LL; /* arbitrary value */ while (n >= 3) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); n -= 3; k += 3; } c += (((uint64_t) nbytes)<<3); switch (n) { case 2: b += k[1]; case 1: a += k[0]; case 0: break; } mix(a,b,c); #else a = c = 0xdeadbeef + (n<<2); b = HASH_CONST[HASH_NR]; while (n > 3) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); n -= 3; k += 3; } switch (n) { case 3: c += k[2]; case 2: b += k[1]; case 1: a += k[0]; final(a,b,c); case 0: break; } #endif j1_spin = c&nmask; j3_spin = a&7; /* 1st bit */ j2_spin = b&nmask; j4_spin = (a>>3)&7; /* 2nd bit */ K1 = c; K2 = b; } #if defined(MURMUR) && (WS==8) /* public-domain, 64-bit MurmurHash3, by Austin Appleby */ /* https://code.google.com/p/smhasher/wiki/MurmurHash3 */ void m_hash(uchar *v, int len) { uint8_t *bp, *data = (uint8_t*) v; int i, nblocks = len / 16; uint64_t h1 = HASH_CONST[HASH_NR]; uint64_t h2 = 0x9e3779b97f4a7c13LL; uint64_t c1 = 0x87c37b91114253d5; uint64_t c2 = 0x4cf5ad432745937f; uint64_t *blocks = (uint64_t *)(data); /* guarantee a multiple of 16 bytes */ i = 16 - (len % 16); if (i > 0 && i < 16) { nblocks++; bp = v + len; switch (i) { case 15: *bp++ = 0; /* fall thru */ case 14: *bp++ = 0; case 13: *bp++ = 0; case 12: *bp++ = 0; case 11: *bp++ = 0; case 10: *bp++ = 0; case 9: *bp++ = 0; case 8: *bp++ = 0; case 7: *bp++ = 0; case 6: *bp++ = 0; case 5: *bp++ = 0; case 4: *bp++ = 0; case 3: *bp++ = 0; case 2: *bp++ = 0; case 1: *bp = 0; case 0: break; } } for (i = 0; i < nblocks; i++) { uint64_t k1 = blocks[i*2]; uint64_t k2 = blocks[i*2+1]; k1 *= c1; k1 = (k1 << 31) | (k1 >> 33); k1 *= c2; h1 ^= k1; h1 = (h1 << 27) | (h1 >> 37); h1 += h2; h1 = h1 * 5 + 0x52dce729; k2 *= c2; k2 = (k2 << 33) | (k2 >> 31); k2 *= c1; h2 ^= k2; h2 = (h2 << 31) | (h2 >> 33); h2 += h1; h2 = h2 * 5 + 0x38495ab5; } uint8_t *tail = (uint8_t*)(data + (nblocks * 16)); uint64_t k1 = 0; uint64_t k2 = 0; switch(len & 15) { case 15: k2 ^= ((uint64_t) tail[14]) << 48; break; case 14: k2 ^= ((uint64_t) tail[13]) << 40; break; case 13: k2 ^= ((uint64_t) tail[12]) << 32; break; case 12: k2 ^= ((uint64_t) tail[11]) << 24; break; case 11: k2 ^= ((uint64_t) tail[10]) << 16; break; case 10: k2 ^= ((uint64_t) tail[ 9]) << 8; break; case 9: k2 ^= ((uint64_t) tail[ 8]) << 0; break; k2 *= c2; k2 = (k2 << 33) | (k2 >> 31); k2 *= c1; h2 ^= k2; break; case 8: k1 ^= ((uint64_t) tail[7]) << 56; break; case 7: k1 ^= ((uint64_t) tail[6]) << 48; break; case 6: k1 ^= ((uint64_t) tail[5]) << 40; break; case 5: k1 ^= ((uint64_t) tail[4]) << 32; break; case 4: k1 ^= ((uint64_t) tail[3]) << 24; break; case 3: k1 ^= ((uint64_t) tail[2]) << 16; break; case 2: k1 ^= ((uint64_t) tail[1]) << 8; break; case 1: k1 ^= ((uint64_t) tail[0]) << 0; break; k1 *= c1; k1 = (k1 << 31) | (k1 >> 33); k1 *= c2; h1 ^= k1; }; h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 ^= h1 >> 33; h1 *= 0xff51afd7ed558ccd; h1 ^= h1 >> 33; h1 *= 0xc4ceb9fe1a85ec53; h1 ^= h1 >> 33; h2 ^= h2 >> 33; h2 *= 0xff51afd7ed558ccd; h2 ^= h2 >> 33; h2 *= 0xc4ceb9fe1a85ec53; h2 ^= h2 >> 33; h1 += h2; h2 += h1; j1_spin = h1&nmask; j3_spin = (h1>>48)&7; j2_spin = h2&nmask; j4_spin = (h2>>48)&7; K1 = h1; K2 = h2; } #endif void s_hash(uchar *cp, int om) { hasher(cp, om); /* sets K1 */ #ifdef BITSTATE if (S_Tab == H_tab) j1_spin = K1 % omaxdepth; else #endif if (ssize < 8*WS) j1_spin = K1&mask; else j1_spin = K1; } #ifndef RANDSTOR int *prerand; void inirand(void) { int i; srand(s_rand+HASH_NR); prerand = (int *) emalloc((omaxdepth+3)*sizeof(int)); for (i = 0; i < omaxdepth+3; i++) { prerand[i] = rand(); } } int pan_rand(void) { if (!prerand) inirand(); return prerand[depth]; } #endif void set_masks(void) { if (WS == 4 && ssize >= 32) { mask = 0xffffffff; #ifdef BITSTATE switch (ssize) { case 34: nmask = (mask>>1); break; case 33: nmask = (mask>>2); break; default: nmask = (mask>>3); break; } #else nmask = mask; #endif } else if (WS == 8) { mask = ((ONE_L<>3; #else nmask = mask; #endif } else if (WS != 4) { fprintf(stderr, "pan: wordsize %ld not supported\n", (long int) WS); exit(1); } else /* WS == 4 and ssize < 32 */ { mask = ((ONE_L<>3); } } #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA) #if NCORE>1 #error cannot combine AUTO_RESIZE with NCORE>1 #endif static long reclaim_size; static char *reclaim_mem; static H_el **N_tab; void reverse_capture(H_el *p) { if (!p) return; reverse_capture(p->nxt); /* last element of list moves first */ /* to preserve list-order */ j2_spin = p->m_K1; if (ssize < 8*WS) /* probably always true */ { j2_spin &= mask; } p->nxt = N_tab[j2_spin]; N_tab[j2_spin] = p; } void resize_hashtable(void) { #ifndef BFS_PAR if (WS == 4 && ssize >= 27 - 1) #endif { return; /* cannot increase further */ } ssize += 2; /* 4x size @htable ssize */ printf("pan: resizing hashtable to -w%d.. ", ssize); N_tab = (H_el **) emalloc((ONE_L<1 int find_claim(char *s) { int i, j; for (i = 0; strncmp(procname[i], ":np_:", 5) != 0; i++) { if (strcmp(s, procname[i]) == 0) { for (j = 0; j < NCLAIMS; j++) { if (spin_c_typ[j] == i) { return j; } } break; } } printf("pan: error: cannot find claim '%s'\n", s); exit(1); return -1; /* unreachable */ } #endif #if defined(BFS_PAR) && defined(BFS_SEP_HASH) int /* to avoid having to include and compile with -lm */ blog2(int n) /* n >= 1 */ { int m=1, r=2; if (n == 1) { return 0; } if (n == 2) { return 1; } while (n > r) { m++; r *= 2; } return m; } #endif uint pp[33]; uint mul(uint a, uint b, uint p) { int c = 0; while (a) { if (a&1) { a ^= 1; c ^= b; } a = (a>>1); if (b & 0x80000000) { b += b; b ^= p; } else { b += b; } } return c; } uint ppow(int n, uint p) { uint t = 1; int i; for (i = 0; i < 32; i++) { if (n & (1<0) s_rand = T_RAND; #elif defined(P_RAND) && (P_RAND>0) s_rand = P_RAND; #endif #ifdef PUTPID { char *ptr = strrchr(argv[0], '/'); if (ptr == NULL) { ptr = argv[0]; } else { ptr++; } progname = emalloc(strlen(ptr)); strcpy(progname, ptr); /* printf("progname: %s\n", progname); */ } #endif #ifdef BITSTATE b_store = bstore_reg; /* default */ #endif { int j; strcpy(o_cmdline, ""); if (strlen(argv[0]) < sizeof(o_cmdname)) { strcpy(o_cmdname, argv[0]); } for (j = 1; j < argc; j++) { strcat(o_cmdline, argv[j]); strcat(o_cmdline, " "); } /* printf("Command Line: %s\n", o_cmdline); */ if (strlen(o_cmdline) >= sizeof(o_cmdline)) { Uerror("option list too long"); } } while (argc > 1 && argv[1][0] == '-') { switch (argv[1][1]) { #ifndef SAFETY #ifdef NP case 'a': fprintf(efd, "warning: -a is disabled by -DNP, ignored\n"); break; #else case 'a': a_cycles = 1; break; #endif #else #if defined(BFS_PAR) && defined(L_BOUND) case 'a': if (isdigit(argv[1][2])) { L_bound = atoi(&argv[1][2]); if (L_bound < 1 || L_bound > 255) { printf("usage: -aN with 00 case 'C': coltrace = 1; goto samething; #endif #endif case 'c': upto = atoi(&argv[1][2]); break; case 'D': dodot++; state_tables++; break; case 'd': state_tables++; break; case 'e': every_error = 1; upto = 0; Nr_Trails = 1; break; case 'E': noends = 1; break; #ifdef SC case 'F': if (strlen(argv[1]) > 2) stackfile = &argv[1][2]; break; #endif #if !defined(SAFETY) && !defined(NOFAIR) case 'f': fairness = 1; break; #endif #ifdef HAS_CODE #if HAS_CODE>0 case 'g': gui = 1; goto samething; #endif #endif case 'h': if (strncmp(&argv[1][1], "hash", strlen("hash")) == 0) { do_hashgen = 1; break; } if (!argv[1][2] || !isdigit((int) argv[1][2])) { usage(efd); /* exits */ } HASH_NR = atoi(&argv[1][2])%(sizeof(HASH_CONST)/sizeof(uint)); break; case 'I': iterative = 2; every_error = 1; break; case 'i': if (strncmp(&argv[1][1], "i_reverse", strlen("i_reverse")) == 0) { reversing |= 1; } else { iterative = 1; every_error = 1; } break; case 'J': like_java = 1; break; /* Klaus Havelund */ #ifdef BITSTATE case 'k': hfns = atoi(&argv[1][2]); break; #endif #ifdef BCS case 'L': sched_max = atoi(&argv[1][2]); if (sched_max > 255) /* stored as one byte */ { fprintf(efd, "warning: using max bound (255)\n"); sched_max = 255; } #ifndef NOREDUCE if (sched_max == 0) { fprintf(efd, "warning: with (default) bound -L0, "); fprintf(efd, "using -DNOREDUCE performs better\n"); } #endif break; #endif #ifndef SAFETY #ifdef NP case 'l': a_cycles = 1; break; #else case 'l': fprintf(efd, "error: -l not available (compile with -DNP)"); usage(efd); break; #endif #endif #ifdef BITSTATE case 'M': udmem = atoi(&argv[1][2]); break; case 'G': udmem = atoi(&argv[1][2]); udmem *= 1024; break; #else case 'M': case 'G': fprintf(stderr, "-M and -G affect only -DBITSTATE\n"); break; #endif case 'm': maxdepth = atoi(&argv[1][2]); break; #ifndef NOCLAIM case 'N': #if NCLAIMS>1 if (isdigit((int)argv[1][2])) { whichclaim = atoi(&argv[1][2]); } else if (isalpha((int)argv[1][2])) { claimname = &argv[1][2]; } else if (argc > 2 && argv[2][0] != '-') /* check next arg */ { claimname = argv[2]; argc--; argv++; /* skip next arg */ } #else #if NCLAIMS==1 fprintf(stderr, "warning: only one claim defined, -N ignored\n"); #else fprintf(stderr, "warning: no claims defined, -N ignored\n"); #endif if (!isdigit((int)argv[1][2]) && argc > 2 && argv[2][0] != '-') { argc--; argv++; } #endif #endif break; case 'n': no_rck = 1; break; case 'P': if (!readtrail && isdigit((int) argv[1][2])) { int x = atoi(&argv[1][2]); if (x != 0 && x != 1) { fprintf(efd, "pan: bad option -P[01], ignored\n"); } if (x == 0) { reversing &= ~1; break; } if (x == 1) { reversing |= 1; break; } if (verbose) fprintf(efd, "pan: reversed *active* process creation %s\n", reversing&1?"on":"off"); break; } /* else */ #ifdef HAS_CODE #if HAS_CODE>0 readtrail = 1; onlyproc = atoi(&argv[1][2]); if (argc > 2 && argv[2][0] != '-') /* check next arg */ { trailfilename = argv[2]; argc--; argv++; /* skip next arg */ } #else fprintf(efd, "pan: option -P not recognized, ignored\n"); #endif #else fprintf(efd, "pan: option -P not recognized, ignored\n"); #endif break; case 'p': #if !defined(BFS) && !defined(BFS_PAR) #ifdef PERMUTED if (strncmp(&argv[1][1], "p_normal", strlen("p_normal")) == 0) { reversing &= ~2; break; } reversing |=2; if (strncmp(&argv[1][1], "p_permute", strlen("p_permute")) == 0) { p_reorder = set_permuted; break; } if (strncmp(&argv[1][1], "p_rotate", strlen("p_rotate")) == 0) { p_reorder = set_rotated; if (isdigit((int) argv[1][9])) { p_rotate = atoi(&argv[1][9]); } else { p_rotate = 1; } break; } if (strncmp(&argv[1][1], "p_randrot", strlen("p_randrot")) == 0) { p_reorder = set_randrot; break; } if (strncmp(&argv[1][1], "p_reverse", strlen("p_reverse")) == 0) { p_reorder = set_reversed; break; } #else if (strncmp(&argv[1][1], "p_permute", strlen("p_permute")) == 0 || strncmp(&argv[1][1], "p_rotate", strlen("p_rotate")) == 0 || strncmp(&argv[1][1], "p_randrot", strlen("p_randrot")) == 0 || strncmp(&argv[1][1], "p_reverse", strlen("p_reverse")) == 0) { fprintf(efd, "option %s required compilation with -DPERMUTED\n", argv[1]); exit(1); } #endif #endif #ifdef SVDUMP vprefix = atoi(&argv[1][2]); #else fprintf(efd, "invalid option '%s' -- ignored\n", argv[1]); #endif break; #if NCORE==1 case 'Q': quota = (double) 60.0 * (double) atoi(&argv[1][2]); #ifndef FREQ freq /= 10.; /* for better resolution */ #endif break; #endif case 'q': strict = 1; break; case 'R': if (argv[1][2] == 'S') /* e.g., -RS76842 */ { s_rand = atoi(&argv[1][3]); break; } #ifdef BITSTATE Nrun = atoi(&argv[1][2]); if (Nrun > 100) { Nrun = 100; } else if (Nrun < 1) { Nrun = 1; } #else usage(efd); break; #endif case 'r': if (strncmp(&argv[1][1], "rhash", strlen("rhash")) == 0) { if (s_rand == 12345) /* default seed */ { #if defined(WIN32) || defined(WIN64) s_rand = (uint) clock(); #else struct tms dummy_tm; s_rand = (uint) times(&dummy_tm); #endif } srand(s_rand++); #ifdef PERMUTED do_hashgen = 1; switch (rand()%5) { case 0: p_reorder = set_permuted; reversing |=2; break; case 1: p_reorder = set_reversed; reversing |=2; break; /* fully randomize p_rotate: */ case 2: p_reorder = set_randrot; reversing |=2; break; /* choose once, then keep p_rotate fixed: */ case 3: p_reorder = set_rotated; p_rotate = rand()%3; reversing |=2; break; default: /* standard search */ break; } if (rand()%2 == 0) { t_reverse = 1; } break; #else fprintf(efd, "option -rhash requires compilation with -DPERMUTED\n"); exit(1); #endif } #if defined(HAS_CODE) && HAS_CODE>0 samething: readtrail = 1; if (isdigit((int)argv[1][2])) whichtrail = atoi(&argv[1][2]); else if (argc > 2 && argv[2][0] != '-') /* check next arg */ { trailfilename = argv[2]; argc--; argv++; /* skip next arg */ } break; case 'S': silent = 1; goto samething; #else fprintf(efd, "options -r is for models with embedded C code\n"); break; #endif case 'T': if (isdigit((int) argv[1][2])) { t_reverse = atoi(&argv[1][2]); if (verbose) printf("pan: reverse transition ordering %s\n", t_reverse?"on":"off"); break; } TMODE = 0444; break; case 't': if (strncmp(&argv[1][1], "t_reverse", strlen("t_reverse")) == 0) { t_reverse = 1; break; } if (argv[1][2]) { tprefix = &argv[1][2]; } break; case 'u': #ifdef BFS_PAR ncores = atoi(&argv[1][2]); #endif break; case 'V': start_timer(); printf("Generated by %s\n", SpinVersion); to_compile(); pan_exit(2); break; case 'v': verbose++; break; case 'w': ssize = atoi(&argv[1][2]); #if defined(BFS_PAR) && defined(BFS_SEP_HASH) used_w = 1; #endif break; case 'Y': signoff = 1; break; case 'X': efd = stdout; break; case 'x': exclusive = 1; break; #if NCORE>1 /* -B ip is passthru to proxy of remote ip address: */ case 'B': argc--; argv++; break; case 'Q': worker_pids[0] = atoi(&argv[1][2]); break; /* -Un means that the nth worker should be instantiated as a proxy */ case 'U': proxy_pid = atoi(&argv[1][2]); break; /* -W means this copy is started by a cluster-server as a remote */ /* this flag is passed to ./pan_proxy, which interprets it */ case 'W': remote_party++; break; case 'Z': core_id = atoi(&argv[1][2]); if (verbose) { printf("cpu%d: pid %d parent %d\n", core_id, getpid(), worker_pids[0]); } break; case 'z': z_handoff = atoi(&argv[1][2]); break; #else case 'z': break; /* ignored for single-core */ #endif default : fprintf(efd, "saw option -%c\n", argv[1][1]); usage(efd); break; } argc--; argv++; } #if defined(BFS_PAR) && defined(BFS_SEP_HASH) if (used_w == 0) { if (ncores == 0) /* all cores used, by default */ { ssize -= blog2(BFS_MAXPROCS - 1); } else { ssize -= blog2(ncores); } } #endif if (do_hashgen) { hashgen(); } #ifndef SAFETY if (fairness && !a_cycles) { fprintf(efd, "error: -f requires the use of -a or -l\n"); usage(efd); } #if ACCEPT_LAB==0 if (a_cycles) { fprintf(efd, "warning: no accept labels are defined, "); fprintf(efd, "so option -a has no effect (ignored)\n"); a_cycles = 0; } #endif #endif #ifdef BFS_PAR uerror = bfs_uerror; Uerror = bfs_Uerror; #else uerror = dfs_uerror; Uerror = dfs_Uerror; #endif if (ssize <= 32) /* 6.2.0 */ { hasher = d_sfh; #if !defined(BITSTATE) && defined(USE_TDH) o_hash = o_hash32; #endif } else { hasher = d_hash; #if !defined(BITSTATE) && defined(USE_TDH) o_hash = o_hash64; #endif } if (iterative && TMODE != 0666) { TMODE = 0666; fprintf(efd, "warning: -T ignored when -i or -I is used\n"); } #if defined(WIN32) || defined(WIN64) #ifndef _S_IWRITE #define S_IWRITE 0000200 /* write permission, owner */ #endif #ifndef _S_IREAD #define S_IREAD 0000400 /* read permission, owner */ #endif if (TMODE == 0666) TMODE = S_IWRITE | S_IREAD; else TMODE = S_IREAD; #endif #if NCORE>1 store_proxy_pid = proxy_pid; /* for checks in mem_file() and someone_crashed() */ if (core_id != 0) { proxy_pid = 0; } #ifndef SEP_STATE if (core_id == 0 && a_cycles) { fprintf(efd, "hint: this search may be more efficient "); fprintf(efd, "if pan.c is compiled -DSEP_STATE\n"); } #endif if (z_handoff < 0) { z_handoff = 20; /* conservative default - for non-liveness checks */ } #if defined(NGQ) || defined(LWQ_FIXED) LWQ_SIZE = (double) (128.*1048576.); #else LWQ_SIZE = (double) ( z_handoff + 2.) * (double) sizeof(SM_frame); #endif #if NCORE>2 if (a_cycles) { fprintf(efd, "warning: the intended nr of cores to be used in liveness mode is 2\n"); #ifndef SEP_STATE fprintf(efd, "warning: without -DSEP_STATE there is no guarantee that all liveness violations are found\n"); #endif } #endif #ifdef HAS_HIDDEN #error cannot use hidden variables when compiling multi-core #endif #endif #if defined(T_RAND) && defined(ELSE_IN_GUARD) #error cannot hide 'else' as guard in d_step, when using -DT_RAND #endif #ifdef BITSTATE if (hfns <= 0) { hfns = 1; fprintf(efd, "warning: using -k%d as minimal usable value\n", hfns); } #endif omaxdepth = maxdepth; #ifdef BITSTATE if (WS == 4 && ssize > 34) { ssize = 34; fprintf(efd, "warning: using -w%d as max usable value\n", ssize); /* * -w35 would not work: 35-3 = 32 but 1^31 is the largest * power of 2 that can be represented in an ulong */ } #else if (WS == 4 && ssize > 27) { ssize = 27; fprintf(efd, "warning: using -w%d as max usable value\n", ssize); /* * for emalloc, the lookup table size multiplies by 4 for the pointers * the largest power of 2 that can be represented in a ulong is 1^31 * hence the largest number of lookup table slots is 31-4 = 27 */ } #endif #ifdef SC hiwater = HHH = maxdepth-10; DDD = HHH/2; if (!stackfile) { stackfile = (char *) emalloc(strlen(PanSource)+4+1); sprintf(stackfile, "%s._s_", PanSource); } if (iterative) { fprintf(efd, "error: cannot use -i or -I with -DSC\n"); pan_exit(1); } #endif #if (defined(R_XPT) || defined(W_XPT)) && !defined(MA) #warning -DR_XPT and -DW_XPT assume -DMA (ignored) #endif if (iterative && a_cycles) fprintf(efd, "warning: -i or -I work for safety properties only\n"); #ifdef BFS #ifdef SC #error -DBFS not compatible with -DSC #endif #ifdef HAS_LAST #error -DBFS not compatible with _last #endif #ifdef HAS_STACK #error cannot use c_track UnMatched with BFS #endif #ifdef BCS #error -DBFS not compatible with -DBCS #endif #ifdef REACH #warning -DREACH is redundant when -DBFS is used #endif #endif #ifdef TRIX #ifdef BITSTATE #error cannot combine -DTRIX and -DBITSTATE #endif #ifdef COLLAPSE #error cannot combine -DTRIX and -DCOLLAPSE #endif #ifdef MA #error cannot combine -DTRIX and -DMA #endif #if defined(BFS_PAR) && defined(BFS_SEP_HEAP) #error cannot combined -DBFS_SEP_HEAP with -DTRIX #endif #endif #ifdef BFS_PAR #ifdef NP #error cannot combine -DBFS_PAR and -DNP #undef NP #endif #endif #ifdef NOCLAIM #ifdef NP #warning using -DNP overrides -DNOCLAIM #undef NOCLAIM #endif #endif #ifdef BCS #ifdef P_RAND #error cannot combine -DBCS and -DP_RAND #endif #ifdef BFS #error cannot combine -DBCS and -DBFS #endif #endif #if defined(MERGED) && defined(PEG) #error to use -DPEG use: spin -o3 -a #endif #if defined(HC) && !defined(BFS_PAR) #ifdef NOCOMP #error cannot combine -DHC and -DNOCOMP #endif #ifdef BITSTATE #error cannot combine -DHC and -DBITSTATE #endif #endif #if defined(SAFETY) && defined(NP) #error cannot combine -DNP and -DBFS or -DSAFETY #endif #ifdef MA #ifdef BITSTATE #error cannot combine -DMA and -DBITSTATE #endif #if MA <= 0 #error usage: -DMA=N with N > 0 and N < VECTORSZ #endif #ifndef NOREDUCE if (a_cycles) { fprintf(stderr, "warning: liveness checking with -DMA "); fprintf(stderr, "but without -DNOREDUCE may be incomplete\n"); } #endif #endif #ifdef COLLAPSE #ifdef BITSTATE #error cannot combine -DBITSTATE and -DCOLLAPSE #endif #ifdef NOCOMP #error cannot combine -DCOLLAPSE and -DNOCOMP #endif #endif if (maxdepth <= 0 || ssize <= 1) usage(efd); #if SYNC>0 && !defined(NOREDUCE) if (a_cycles && fairness) { fprintf(efd, "error: p.o. reduction not compatible with "); fprintf(efd, "fairness (-f) in models\n"); fprintf(efd, " with rendezvous operations: "); fprintf(efd, "recompile with -DNOREDUCE\n"); pan_exit(1); } #endif #if defined(REM_VARS) && !defined(NOREDUCE) #warning p.o. reduction not compatible with remote varrefs (use -DNOREDUCE) #endif #if defined(NOCOMP) && !defined(BITSTATE) if (a_cycles) { fprintf(efd, "error: use of -DNOCOMP voids -l and -a\n"); pan_exit(1); } #endif #ifdef MEMLIM memlim = ((double) MEMLIM) * (double) (1<<20); /* size in Mbyte */ #endif #if SYNC>0 #ifdef HAS_PRIORITY #error use of priorities cannot be combined with rendezvous #elif HAS_ENABLED #error use of enabled() cannot be combined with rendezvous #endif #endif #ifndef NOREDUCE #ifdef HAS_PRIORITY #warning use of priorities requires -DNOREDUCE #elif HAS_ENABLED #error use of enabled() requires -DNOREDUCE #endif #ifdef HAS_PCVALUE #error use of pcvalue() requires -DNOREDUCE #endif #ifdef HAS_BADELSE #error use of 'else' combined with i/o stmnts requires -DNOREDUCE #endif #if defined(HAS_LAST) && !defined(BCS) #error use of _last requires -DNOREDUCE #endif #endif #if SYNC>0 && !defined(NOREDUCE) #ifdef HAS_UNLESS fprintf(efd, "warning: use of a rendezvous stmnts in the escape\n"); fprintf(efd, " of an unless clause, if present, could make p.o. reduction\n"); fprintf(efd, " invalid (use -DNOREDUCE to avoid this)\n"); #ifdef BFS fprintf(efd, " (this type of rv is also not compatible with -DBFS)\n"); #endif #endif #endif #if SYNC>0 && defined(BFS) if (!noends) fprintf(efd, "warning: use of rendezvous with BFS does not preserve all invalid endstates\n"); #endif #if !defined(REACH) && !defined(BITSTATE) if (iterative != 0 && a_cycles == 0) { fprintf(efd, "warning: -i and -I need -DREACH to work accurately\n"); } #endif #if defined(BITSTATE) && defined(REACH) #warning -DREACH is voided by -DBITSTATE #endif #if defined(MA) && defined(REACH) #warning -DREACH is voided by -DMA #endif #if defined(FULLSTACK) && defined(CNTRSTACK) #error cannot combine -DFULLSTACK and -DCNTRSTACK #endif #if defined(VERI) #if ACCEPT_LAB>0 #ifndef BFS if (!a_cycles #ifdef HAS_CODE && !readtrail #endif #if NCORE>1 && core_id == 0 #endif && !state_tables) #ifdef NP { fprintf(efd, "warning: non-progress claim "); fprintf(efd, "requires -l flag to fully verify\n"); #else { fprintf(efd, "warning: never claim + accept labels "); fprintf(efd, "requires -a flag to fully verify\n"); #endif } #else if (verbose && !state_tables #ifdef HAS_CODE && !readtrail #endif ) { fprintf(efd, "warning: verification in BFS mode "); fprintf(efd, "is restricted to safety properties\n"); } #endif #endif #endif #ifndef SAFETY #if 0 if (!a_cycles #ifdef HAS_CODE && !readtrail #endif #if NCORE>1 && core_id == 0 #endif && !state_tables) { fprintf(efd, "hint: this search is more efficient "); fprintf(efd, "if pan.c is compiled -DSAFETY\n"); } #endif #ifndef NOCOMP if (!a_cycles) { S_A = 0; } else { if (!fairness) { S_A = 1; /* _a_t */ #ifndef NOFAIR } else /* _a_t and _cnt[NFAIR] */ { S_A = (&(now._cnt[0]) - (uchar *) &now) + NFAIR - 2; /* -2 because first two uchars in now are masked */ #endif } } #endif #endif signal(SIGINT, stopped); set_masks(); #if defined(BFS) || defined(BFS_PAR) trail = (Trail *) emalloc(6*sizeof(Trail)); trail += 3; #else trail = (Trail *) emalloc((maxdepth+3)*sizeof(Trail)); trail++; /* protect trpt-1 refs at depth 0 */ #endif trpt = &trail[0]; /* precaution -- in case uerror is called early */ #ifdef BFS ntrpt = trpt; #endif #ifdef SVDUMP if (vprefix > 0) { char nm[64]; sprintf(nm, "%s.svd", PanSource); if ((svfd = creat(nm, TMODE)) < 0) { fprintf(efd, "couldn't create %s\n", nm); vprefix = 0; } } #endif #ifdef RANDSTOR srand(s_rand+HASH_NR); #endif #if SYNC>0 && ASYNC==0 set_recvs(); #endif run(); done = 1; wrapup(); return 0; } void usage(FILE *fd) { fprintf(fd, "%s\n", SpinVersion); fprintf(fd, "Valid Options are:\n"); #ifndef SAFETY #ifdef NP fprintf(fd, " -a -> is disabled by -DNP "); fprintf(fd, "(-DNP compiles for -l only)\n"); #else fprintf(fd, " -a find acceptance cycles\n"); #endif #else fprintf(fd, " -a,-l,-f -> are disabled by -DSAFETY\n"); #endif fprintf(fd, " -A ignore assert() violations\n"); fprintf(fd, " -b consider it an error to exceed the depth-limit\n"); fprintf(fd, " -cN stop at Nth error "); fprintf(fd, "(defaults to -c1)\n"); fprintf(fd, " -D print state tables in dot-format and stop\n"); fprintf(fd, " -d print state tables and stop\n"); fprintf(fd, " -e create trails for all errors\n"); fprintf(fd, " -E ignore invalid end states\n"); #ifdef SC fprintf(fd, " -Ffile use 'file' to store disk-stack\n"); #endif #ifndef NOFAIR fprintf(fd, " -f add weak fairness (to -a or -l)\n"); #endif fprintf(fd, " -hN use different hash-seed N:0..499 (defaults to -h0)\n"); fprintf(fd, " -hash generate a random hash-polynomial for -h0 (see also -rhash)\n"); fprintf(fd, " using a seed set with -RSn (default %u)\n", s_rand); fprintf(fd, " -i search for shortest path to error\n"); fprintf(fd, " -I like -i, but approximate and faster\n"); fprintf(fd, " -J reverse eval order of nested unlesses\n"); #ifdef BITSTATE fprintf(fd, " -kN set N bits per state (defaults to 3)\n"); #endif #ifdef BCS fprintf(fd, " -LN set scheduling restriction to N (default 0)\n"); #endif #ifndef SAFETY #ifdef NP fprintf(fd, " -l find non-progress cycles\n"); #else fprintf(fd, " -l find non-progress cycles -> "); fprintf(fd, "disabled, requires "); fprintf(fd, "compilation with -DNP\n"); #endif #endif #ifdef BITSTATE fprintf(fd, " -MN use N Megabytes for bitstate hash array\n"); fprintf(fd, " -GN use N Gigabytes for bitstate hash array\n"); #endif fprintf(fd, " -mN max depth N steps (default=10k)\n"); #if NCLAIMS>1 fprintf(fd, " -N cn -- use the claim named cn\n"); fprintf(fd, " -Nn -- use claim number n\n"); #endif fprintf(fd, " -n no listing of unreached states\n"); #ifdef PERMUTED fprintf(fd, " -p_permute randomize order in which processes are scheduled (see also -rhash)\n"); fprintf(fd, " -p_reverse reverse order in which processes are scheduled (see also -rhash)\n"); fprintf(fd, " -p_rotateN rotate by N the process scheduling order (see also -rhash)\n"); #endif #ifdef SVDUMP fprintf(fd, " -pN create svfile (save N bytes per state)\n"); #endif fprintf(fd, " -QN set time-limit on execution of N minutes\n"); fprintf(fd, " -q require empty chans in valid end states\n"); #ifdef HAS_CODE fprintf(fd, " -r read and execute trail - can add -v,-n,-PN,-g,-C\n"); fprintf(fd, " -r trailfilename read and execute trail in file\n"); fprintf(fd, " -rN read and execute N-th error trail\n"); fprintf(fd, " -C read and execute trail - columnated output (can add -v,-n)\n"); fprintf(fd, " -r -PN read and execute trail - restrict trail output to proc N\n"); fprintf(fd, " -g read and execute trail + msc gui support\n"); fprintf(fd, " -S silent replay: only user defined printfs show\n"); #endif fprintf(fd, " -RSn use randomization seed n\n"); fprintf(fd, " -rhash use random hash-polynomial and randomly choose -p_rotateN, -p_permute, or p_reverse\n"); #ifdef BITSTATE fprintf(fd, " -Rn run n times n: [1..100] using n "); fprintf(fd, " different hash functions\n"); #endif fprintf(fd, " -T create trail files in read-only mode\n"); fprintf(fd, " -t_reverse reverse order in which transitions are explored\n"); fprintf(fd, " -tsuf replace .trail with .suf on trailfiles\n"); fprintf(fd, " -V print SPIN version number\n"); fprintf(fd, " -v verbose -- filenames in unreached state listing\n"); fprintf(fd, " -wN hashtable of 2^N entries "); fprintf(fd, "(defaults to -w%d)\n", ssize); fprintf(fd, " -x do not overwrite an existing trail file\n"); #if NCORE>1 fprintf(fd, " -zN handoff states below depth N to 2nd cpu (multi_core)\n"); #endif #ifdef HAS_CODE fprintf(fd, "\n options -r, -C, -PN, -g, and -S can optionally be followed by\n"); fprintf(fd, " a filename argument, as in '-r filename', naming the trailfile\n"); #endif #if NCORE>1 multi_usage(fd); #endif exit(1); } char * Malloc(ulong n) { char *tmp; #ifdef MEMLIM if (memcnt + (double) n > memlim) { printf("pan: reached -DMEMLIM bound\n"); goto err; } #endif tmp = (char *) malloc(n); if (!tmp) { #ifdef BFS_PAR Uerror("out of non-shared memory"); #endif printf("pan: out of memory\n"); #ifdef MEMLIM err: printf(" %g bytes used\n", memcnt); printf(" %g bytes more needed\n", (double) n); printf(" %g bytes limit\n", memlim); #endif #ifdef COLLAPSE printf("hint: to reduce memory, recompile with\n"); #ifndef MA printf(" -DMA=%d # better/slower compression, or\n", hmax); #endif printf(" -DBITSTATE # supertrace, approximation\n"); #else #ifndef BITSTATE printf("hint: to reduce memory, recompile with\n"); #ifndef HC printf(" -DCOLLAPSE # good, fast compression, or\n"); #ifndef MA printf(" -DMA=%d # better/slower compression, or\n", hmax); #endif printf(" -DHC # hash-compaction, approximation\n"); #endif printf(" -DBITSTATE # supertrace, approximation\n"); #endif #endif #if NCORE>1 #ifdef FULL_TRAIL printf(" omit -DFULL_TRAIL or use pan -c0 to reduce memory\n"); #endif #ifdef SEP_STATE printf("hint: to reduce memory, recompile without\n"); printf(" -DSEP_STATE # may be faster, but uses more memory\n"); #endif #endif wrapup(); } memcnt += (double) n; return tmp; } #define CHUNK (100*VECTORSZ) char * emalloc(ulong n) /* never released or reallocated */ { char *tmp; if (n == 0) return (char *) NULL; if (n&(sizeof(void *)-1)) /* for proper alignment */ n += sizeof(void *)-(n&(sizeof(void *)-1)); if ((ulong) left < n) { grow = (n < CHUNK) ? CHUNK : n; have = Malloc(grow); fragment += (double) left; left = grow; } tmp = have; have += (long) n; left -= (long) n; memset(tmp, 0, n); return tmp; } void dfs_Uerror(char *str) { /* always fatal */ uerror(str); #if NCORE>1 sudden_stop("Uerror"); #endif #ifdef BFS_PAR bfs_shutdown("Uerror"); #endif wrapup(); } #if defined(MA) && !defined(SAFETY) int Unwind(void) { Trans *t; uchar ot, _m; int tt; short II; #ifdef VERBOSE int i; #endif uchar oat = now._a_t; now._a_t &= ~(1|16|32); memcpy((char *) &comp_now, (char *) &now, vsize); now._a_t = oat; Up: #ifdef SC trpt = getframe(depth); #endif #ifdef VERBOSE printf("%ld State: ", depth); #if !defined(NOCOMP) && !defined(HC) for (i = 0; i < vsize; i++) printf("%d%s,", ((char *)&now)[i], Mask[i]?"*":""); #else for (i = 0; i < vsize; i++) printf("%d,", ((char *)&now)[i]); #endif printf("\n"); #endif #ifndef NOFAIR if (trpt->o_pm&128) /* fairness alg */ { now._cnt[now._a_t&1] = trpt->bup.oval; depth--; #ifdef SC trpt = getframe(depth); #else trpt--; #endif goto Q999; } #endif #ifdef HAS_LAST #ifdef VERI { long d; Trail *trl; now._last = 0; for (d = 1; d < depth; d++) { trl = getframe(depth-d); /* was trl = (trpt-d); */ if (trl->pr != 0) { now._last = trl->pr - BASE; break; } } } #else now._last = (depth<1)?0:(trpt-1)->pr; #endif #endif #ifdef EVENT_TRACE now._event = trpt->o_event; #endif if ((now._a_t&1) && depth <= A_depth) { now._a_t &= ~(1|16|32); if (fairness) now._a_t |= 2; /* ? */ A_depth = 0; goto CameFromHere; /* checkcycles() */ } t = trpt->o_t; ot = trpt->o_ot; II = trpt->pr; tt = trpt->o_tt; _this = pptr(II); _m = do_reverse(t, II, trpt->o_m); #ifdef VERBOSE printf("%3ld: proc %d ", depth, II); printf("reverses %d, %d to %d,", t->forw, tt, t->st); printf(" %s [abit=%d,adepth=%ld,", t->tp, now._a_t, A_depth); printf("tau=%d,%d] \n", trpt->tau, (trpt-1)->tau); #endif depth--; #ifdef SC trpt = getframe(depth); #else trpt--; #endif /* reached[ot][t->st] = 1; 3.4.13 */ ((P0 *)_this)->_p = tt; #ifndef NOFAIR if ((trpt->o_pm&32)) { #ifdef VERI if (now._cnt[now._a_t&1] == 0) now._cnt[now._a_t&1] = 1; #endif now._cnt[now._a_t&1] += 1; } Q999: if (trpt->o_pm&8) { now._a_t &= ~2; now._cnt[now._a_t&1] = 0; } if (trpt->o_pm&16) now._a_t |= 2; #endif CameFromHere: if (memcmp((char *) &now, (char *) &comp_now, vsize) == 0) return depth; if (depth > 0) goto Up; return 0; } #endif static char unwinding; void dfs_uerror(char *str) { static char laststr[256]; int is_cycle; if (unwinding) return; /* 1.4.2 */ if (strncmp(str, laststr, 254)) #if NCORE>1 cpu_printf("pan:%lu: %s (at depth %ld)\n", errors+1, str, #else printf("pan:%lu: %s (at depth %ld)\n", errors+1, str, #endif #if NCORE>1 (nr_handoffs * z_handoff) + #endif ((depthfound == -1)?depth:depthfound)); strncpy(laststr, str, 254); errors++; #ifdef HAS_CODE if (readtrail) { wrap_trail(); return; } #endif is_cycle = (strstr(str, " cycle") != (char *) 0); if (!is_cycle) { depth++; trpt++; } if ((every_error != 0) || errors == upto) { #if defined(MA) && !defined(SAFETY) if (is_cycle) { int od = depth; unwinding = 1; depthfound = Unwind(); unwinding = 0; depth = od; } #endif #if NCORE>1 writing_trail = 1; #endif #ifdef BFS if (depth > 1) trpt--; nuerror(); if (depth > 1) trpt++; #else putrail(); #endif #if defined(MA) && !defined(SAFETY) if (strstr(str, " cycle")) { if (every_error) printf("sorry: MA writes 1 trail max\n"); wrapup(); /* no recovery from unwind */ } #endif #if NCORE>1 if (search_terminated != NULL) { *search_terminated |= 4; /* uerror */ } writing_trail = 0; #endif } if (!is_cycle) { depth--; trpt--; /* undo */ } #ifndef BFS if (iterative != 0 && maxdepth > 0) { if (maxdepth > depth) { maxdepth = (iterative == 1)?(depth+1):(depth/2); } warned = 1; printf("pan: reducing search depth to %ld\n", maxdepth); } else #endif if (errors >= upto && upto != 0) { #ifdef BFS_PAR bfs_shutdown("uerror"); /* no return */ #endif #if NCORE>1 sudden_stop("uerror"); #endif wrapup(); } depthfound = -1; } int xrefsrc(int lno, S_F_MAP *mp, int M, int i) { Trans *T; int j, retval=1; for (T = trans[M][i]; T; T = T->nxt) if (T && T->tp) { if (strcmp(T->tp, ".(goto)") == 0 || strncmp(T->tp, "goto :", 6) == 0) return 1; /* not reported */ for (j = 0; j < sizeof(mp); j++) if (i >= mp[j].from && i <= mp[j].upto) { printf("\t%s:%d", mp[j].fnm, lno); break; } if (j >= sizeof(mp)) /* fnm not found in list */ { printf("\t%s:%d", PanSource, lno); /* use default */ } printf(", state %d", i); if (strcmp(T->tp, "") != 0) { char *q; q = transmognify(T->tp); printf(", \"%s\"", q?q:""); } else if (stopstate[M][i]) printf(", -end state-"); printf("\n"); retval = 0; /* reported */ } return retval; } void r_ck(uchar *which, int N, int M, short *src, S_F_MAP *mp) { int i, m=0; if ((enum btypes) Btypes[M] == N_CLAIM && claimname != NULL && strcmp(claimname, procname[M]) != 0) { return; } switch ((enum btypes) Btypes[M]) { case P_PROC: case A_PROC: printf("unreached in proctype %s\n", procname[M]); break; case I_PROC: printf("unreached in init\n"); break; case E_TRACE: case N_TRACE: case N_CLAIM: default: printf("unreached in claim %s\n", procname[M]); break; } for (i = 1; i < N; i++) { if (which[i] == 0 && (mapstate[M][i] == 0 || which[mapstate[M][i]] == 0)) { m += xrefsrc((int) src[i], mp, M, i); } else { m++; } } printf(" (%d of %d states)\n", N-1-m, N-1); } #if NCORE>1 && !defined(SEP_STATE) static long rev_trail_cnt; #ifdef FULL_TRAIL void rev_trail(int fd, volatile Stack_Tree *st_tr) { long j; char snap[64]; if (!st_tr) { return; } rev_trail(fd, st_tr->prv); #ifdef VERBOSE printf("%d (%d) LRT [%d,%d] -- %9u (root %9u)\n", depth, rev_trail_cnt, st_tr->pr, st_tr->t_id, st_tr, stack_last[core_id]); #endif if (st_tr->pr != 255) { sprintf(snap, "%ld:%d:%d\n", rev_trail_cnt++, st_tr->pr, st_tr->t_id); j = strlen(snap); if (write(fd, snap, j) != j) { printf("pan: error writing trailfile\n"); close(fd); wrapup(); return; } } else /* handoff point */ { if (a_cycles) { (void) write(fd, "-1:-1:-1\n", 9); } } } #endif #endif void putrail(void) { int fd; #if defined VERI || defined(MERGED) char snap[64]; #endif #if NCORE==1 || defined(SEP_STATE) || !defined(FULL_TRAIL) long i, j; Trail *trl; #endif fd = make_trail(); if (fd < 0) return; #ifdef VERI sprintf(snap, "-2:%d:-2\n", (uchar) ((P0 *)pptr(0))->_t); if (write(fd, snap, strlen(snap)) < 0) return; #endif #ifdef MERGED sprintf(snap, "-4:-4:-4\n"); if (write(fd, snap, strlen(snap)) < 0) return; #endif #ifdef PERMUTED sprintf(snap, "-5:%d:%d\n", t_reverse, reversing&2); if (write(fd, snap, strlen(snap)) < 0) return; sprintf(snap, "-6:%d:%d\n", p_reorder==set_permuted, p_reorder==set_reversed); if (write(fd, snap, strlen(snap)) < 0) return; sprintf(snap, "-7:%d:%d\n", p_reorder==set_rotated, p_rotate); if (write(fd, snap, strlen(snap)) < 0) return; sprintf(snap, "-8:%d:%d\n", p_reorder==set_randrot, --s_rand); if (write(fd, snap, strlen(snap)) < 0) return; #endif #if NCORE>1 && !defined(SEP_STATE) && defined(FULL_TRAIL) rev_trail_cnt = 1; enter_critical(GLOBAL_LOCK); rev_trail(fd, stack_last[core_id]); leave_critical(GLOBAL_LOCK); #else i = 1; /* trail starts at position 1 */ #if NCORE>1 && defined(SEP_STATE) if (cur_Root.m_vsize > 0) { i++; depth++; } #endif for ( ; i <= depth; i++) { if (i == depthfound+1) { if (write(fd, "-1:-1:-1\n", 9) != 9) { goto notgood; } } trl = getframe(i); if (!trl->o_t) continue; if (trl->o_pm&128) continue; sprintf(snap, "%ld:%d:%d\n", i, trl->pr, trl->o_t->t_id); j = strlen(snap); if (write(fd, snap, j) != j) { notgood: printf("pan: error writing trailfile\n"); close(fd); wrapup(); } } #endif close(fd); #if NCORE>1 cpu_printf("pan: wrote trailfile\n"); #endif } void sv_save(void) /* push state vector onto save stack */ { if (!svtack->nxt) { svtack->nxt = (Svtack *) emalloc(sizeof(Svtack)); svtack->nxt->body = emalloc(vsize*sizeof(char)); svtack->nxt->lst = svtack; svtack->nxt->m_delta = vsize; svmax++; } else if (vsize > svtack->nxt->m_delta) { svtack->nxt->body = emalloc(vsize*sizeof(char)); svtack->nxt->lst = svtack; svtack->nxt->m_delta = vsize; svmax++; } svtack = svtack->nxt; #if SYNC svtack->o_boq = boq; #endif #ifdef TRIX sv_populate(); #endif svtack->o_delta = vsize; /* don't compress */ memcpy((char *)(svtack->body), (char *) &now, vsize); #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1) c_stack((uchar *) &(svtack->c_stack[0])); #endif #ifdef DEBUG cpu_printf("%d: sv_save\n", depth); #endif } void sv_restor(void) /* pop state vector from save stack */ { memcpy((char *)&now, svtack->body, svtack->o_delta); #if SYNC boq = svtack->o_boq; #endif #ifdef TRIX re_populate(); #endif #if defined(C_States) && (HAS_TRACK==1) #ifdef HAS_STACK c_unstack((uchar *) &(svtack->c_stack[0])); #endif c_revert((uchar *) &(now.c_state[0])); #endif if (vsize != svtack->o_delta) Uerror("sv_restor"); if (!svtack->lst) Uerror("error: sv_restor"); svtack = svtack->lst; #ifdef DEBUG cpu_printf(" sv_restor\n"); #endif } void p_restor(int h) { int i; char *z = (char *) &now; #ifdef BFS_PAR bfs_prepmask(1); /* p_restor */ #endif #ifndef TRIX proc_offset[h] = stack->o_offset; proc_skip[h] = (uchar) stack->o_skip; #else char *oi; #ifdef V_TRIX printf("%4d: p_restor %d\n", depth, h); #endif #endif #ifndef XUSAFE p_name[h] = stack->o_name; #endif #ifdef TRIX vsize += sizeof(char *); #ifndef BFS if (processes[h] != NULL || freebodies == NULL) { Uerror("processes error"); } processes[h] = freebodies; freebodies = freebodies->nxt; processes[h]->nxt = (TRIX_v6 *) 0; processes[h]->modified = 1; /* p_restor */ #endif processes[h]->parent_pid = stack->parent; processes[h]->psize = stack->o_delta; memcpy((char *)pptr(h), stack->b_ptr, stack->o_delta); oi = stack->b_ptr; #else #if !defined(NOCOMP) && !defined(HC) for (i = vsize + stack->o_skip; i > vsize; i--) Mask[i-1] = 1; /* align */ #endif vsize += stack->o_skip; memcpy(z+vsize, stack->body, stack->o_delta); vsize += stack->o_delta; #if !defined(NOCOMP) && !defined(HC) for (i = 1; i <= Air[((P0 *)pptr(h))->_t]; i++) Mask[vsize - i] = 1; /* pad */ Mask[proc_offset[h]] = 1; /* _pid */ #endif if (BASE > 0 && h > 0) ((P0 *)pptr(h))->_pid = h-BASE; else ((P0 *)pptr(h))->_pid = h; #ifdef BFS_PAR bfs_fixmask(1); /* p_restor */ #endif #endif now._nr_pr += 1; #ifndef NOVSZ now._vsz = vsize; #endif i = stack->o_delqs; if (!stack->lst) Uerror("error: p_restor"); stack = stack->lst; _this = pptr(h); while (i-- > 0) q_restor(); #ifdef TRIX re_mark_all(1); /* p_restor - all chans move up in _ids_ */ now._ids_[h] = oi; /* restor the original contents */ #endif } void q_restor(void) { int h = now._nr_qs; #ifdef TRIX #ifdef V_TRIX printf("%4d: q_restor %d\n", depth, h); #endif vsize += sizeof(char *); #ifndef BFS if (channels[h] != NULL || freebodies == NULL) { Uerror("channels error"); } channels[h] = freebodies; freebodies = freebodies->nxt; channels[h]->nxt = (TRIX_v6 *) 0; channels[h]->modified = 1; /* q_restor */ #endif channels[h]->parent_pid = stack->parent; channels[h]->psize = stack->o_delta; memcpy((char *)qptr(h), stack->b_ptr, stack->o_delta); now._ids_[now._nr_pr + h] = stack->b_ptr; #else char *z = (char *) &now; #ifndef NOCOMP int k, k_end; #endif #ifdef BFS_PAR bfs_prepmask(2); /* q_restor */ #endif q_offset[h] = stack->o_offset; q_skip[h] = (uchar) stack->o_skip; vsize += stack->o_skip; memcpy(z+vsize, stack->body, stack->o_delta); vsize += stack->o_delta; #endif #ifndef XUSAFE q_name[h] = stack->o_name; #endif #ifndef NOVSZ now._vsz = vsize; #endif now._nr_qs += 1; #ifndef TRIX #if !defined(NOCOMP) && !defined(HC) k_end = stack->o_offset; k = k_end - stack->o_skip; #if SYNC #ifndef BFS if (q_zero(now._nr_qs)) k_end += stack->o_delta; #endif #endif for ( ; k < k_end; k++) Mask[k] = 1; #endif #ifdef BFS_PAR bfs_fixmask(2); /* q_restor */ #endif #endif if (!stack->lst) Uerror("error: q_restor"); stack = stack->lst; } typedef struct IntChunks { int *ptr; struct IntChunks *nxt; } IntChunks; IntChunks *filled_chunks[512]; IntChunks *empty_chunks[512]; int * grab_ints(int nr) { IntChunks *z; if (nr >= 512) Uerror("cannot happen grab_int"); if (filled_chunks[nr]) { z = filled_chunks[nr]; filled_chunks[nr] = filled_chunks[nr]->nxt; } else { z = (IntChunks *) emalloc(sizeof(IntChunks)); z->ptr = (int *) emalloc(nr * sizeof(int)); } z->nxt = empty_chunks[nr]; empty_chunks[nr] = z; return z->ptr; } void ungrab_ints(int *p, int nr) { IntChunks *z; if (!empty_chunks[nr]) Uerror("cannot happen ungrab_int"); z = empty_chunks[nr]; empty_chunks[nr] = empty_chunks[nr]->nxt; z->ptr = p; z->nxt = filled_chunks[nr]; filled_chunks[nr] = z; } int delproc(int sav, int h) { int d, i=0; #ifndef NOCOMP int o_vsize = vsize; #endif if (h+1 != (int) now._nr_pr) { return 0; } #ifdef TRIX #ifdef V_TRIX printf("%4d: delproc %d -- parent %d\n", depth, h, processes[h]->parent_pid); if (now._nr_qs > 0) printf(" top channel: %d -- parent %d\n", now._nr_qs-1, channels[now._nr_qs-1]->parent_pid); #endif while (now._nr_qs > 0 && channels[now._nr_qs-1]->parent_pid == processes[h]->parent_pid) { delq(sav); i++; } d = processes[h]->psize; if (sav) { if (!stack->nxt) { stack->nxt = (_Stack *) emalloc(sizeof(_Stack)); stack->nxt->lst = stack; smax++; } stack = stack->nxt; #ifndef XUSAFE stack->o_name = p_name[h]; #endif stack->parent = processes[h]->parent_pid; stack->o_delta = d; stack->o_delqs = i; stack->b_ptr = now._ids_[h]; } memset((char *)pptr(h), 0, d); #ifndef BFS processes[h]->nxt = freebodies; freebodies = processes[h]; processes[h] = (TRIX_v6 *) 0; #endif vsize -= sizeof(char *); now._nr_pr -= 1; re_mark_all(-1); /* delproc - all chans move down in _ids_ */ #else while (now._nr_qs && q_offset[now._nr_qs-1] > proc_offset[h]) { delq(sav); i++; } d = vsize - proc_offset[h]; if (sav) { if (!stack->nxt) { stack->nxt = (_Stack *) emalloc(sizeof(_Stack)); stack->nxt->body = emalloc(Maxbody * sizeof(char)); stack->nxt->lst = stack; smax++; } stack = stack->nxt; stack->o_offset = proc_offset[h]; #if VECTORSZ>32000 stack->o_skip = (int) proc_skip[h]; #else stack->o_skip = (short) proc_skip[h]; #endif #ifndef XUSAFE stack->o_name = p_name[h]; #endif stack->o_delta = d; stack->o_delqs = i; memcpy(stack->body, (char *)pptr(h), d); } vsize = proc_offset[h]; now._nr_pr -= 1; memset((char *)pptr(h), 0, d); vsize -= (int) proc_skip[h]; #if !defined(NOCOMP) && !defined(HC) #ifdef BFS_PAR bfs_prepmask(3); /* delproc - no chance in proc_offset or proc_skip */ #endif for (i = vsize; i < o_vsize; i++) Mask[i] = 0; /* reset */ #ifdef BFS_PAR bfs_fixmask(3); /* delproc */ #endif #endif #endif #ifndef NOVSZ now._vsz = vsize; #endif return 1; } void delq(int sav) { int h = now._nr_qs - 1; #ifdef TRIX int d = channels[now._nr_qs - 1]->psize; #else int d = vsize - q_offset[now._nr_qs - 1]; #endif #ifndef NOCOMP int k, o_vsize = vsize; #endif if (sav) { if (!stack->nxt) { stack->nxt = (_Stack *) emalloc(sizeof(_Stack)); #ifndef TRIX stack->nxt->body = emalloc(Maxbody * sizeof(char)); #endif stack->nxt->lst = stack; smax++; } stack = stack->nxt; #ifdef TRIX stack->parent = channels[h]->parent_pid; stack->b_ptr = now._ids_[h]; #else stack->o_offset = q_offset[h]; #if VECTORSZ>32000 stack->o_skip = (int) q_skip[h]; #else stack->o_skip = (short) q_skip[h]; #endif #endif #ifndef XUSAFE stack->o_name = q_name[h]; #endif stack->o_delta = d; #ifndef TRIX memcpy(stack->body, (char *)qptr(h), d); #endif } #ifdef TRIX vsize -= sizeof(char *); #ifdef V_TRIX printf("%4d: delq %d parent %d\n", depth, h, channels[h]->parent_pid); #endif #else vsize = q_offset[h]; vsize -= (int) q_skip[h]; #if !defined(NOCOMP) && !defined(HC) #ifdef BFS_PAR bfs_prepmask(3); /* delq - no change in q_offset or q_skip */ #endif for (k = vsize; k < o_vsize; k++) Mask[k] = 0; /* reset */ #ifdef BFS_PAR bfs_fixmask(3); /* delq */ #endif #endif #endif now._nr_qs -= 1; memset((char *)qptr(h), 0, d); #ifdef TRIX #ifndef BFS channels[h]->nxt = freebodies; freebodies = channels[h]; channels[h] = (TRIX_v6 *) 0; #endif #endif #ifndef NOVSZ now._vsz = vsize; #endif } int qs_empty(void) { int i; for (i = 0; i < (int) now._nr_qs; i++) { if (q_sz(i) > 0) return 0; } return 1; } int endstate(void) { int i; P0 *ptr; for (i = BASE; i < (int) now._nr_pr; i++) { ptr = (P0 *) pptr(i); if (!stopstate[ptr->_t][ptr->_p]) return 0; } if (strict) return qs_empty(); #if defined(EVENT_TRACE) if (!stopstate[EVENT_TRACE][now._event] && !a_cycles) { printf("pan: event_trace not completed\n"); return 0; } #endif return 1; } #if !defined(SAFETY) && !defined(BFS) void checkcycles(void) { uchar o_a_t = now._a_t; #ifndef NOFAIR uchar o_cnt = now._cnt[1]; #endif #ifdef FULLSTACK #ifndef MA H_el *sv = trpt->ostate; /* save */ #else uchar prov = trpt->proviso; /* save */ #endif #endif #ifdef DEBUG { int i; uchar *v = (uchar *) &now; printf(" set Seed state "); #ifndef NOFAIR if (fairness) printf("(cnt = %d:%d, nrpr=%d) ", now._cnt[0], now._cnt[1], now._nr_pr); #endif /* for (i = 0; i < n; i++) printf("%d,", v[i]); */ printf("\n"); } printf("%ld: cycle check starts\n", depth); #endif now._a_t |= (1|16|32); /* 1 = 2nd DFS; (16|32) to improve hashing */ #ifndef NOFAIR now._cnt[1] = now._cnt[0]; #endif memcpy((char *)&A_Root, (char *)&now, vsize); A_depth = depthfound = depth; #if NCORE>1 mem_put_acc(); #else new_state(); /* start 2nd DFS */ #endif now._a_t = o_a_t; #ifndef NOFAIR now._cnt[1] = o_cnt; #endif A_depth = 0; depthfound = -1; #ifdef DEBUG printf("%ld: cycle check returns\n", depth); #endif #ifdef FULLSTACK #ifndef MA trpt->ostate = sv; /* restore */ #else trpt->proviso = prov; #endif #endif } #endif #if defined(FULLSTACK) && defined(BITSTATE) H_el *Free_list = (H_el *) 0; void onstack_init(void) /* to store stack states in a bitstate search */ { S_Tab = (H_el **) emalloc(maxdepth*sizeof(H_el *)); } #endif #if !defined(BFS_PAR) #if defined(FULLSTACK) && defined(BITSTATE) H_el * grab_state(int n) { H_el *v, *last = 0; if (H_tab == S_Tab) { for (v = Free_list; v && ((int) v->tagged >= n); v=v->nxt) { if ((int) v->tagged == n) { if (last) last->nxt = v->nxt; else gotcha: Free_list = v->nxt; v->tagged = 0; v->nxt = 0; #ifdef COLLAPSE v->ln = 0; #endif return v; } Fh++; last=v; } /* new: second try */ v = Free_list; if (v && ((int) v->tagged >= n)) goto gotcha; ngrabs++; } return (H_el *) emalloc(sizeof(H_el)+n-sizeof(unsigned)); } #else #if NCORE>1 H_el * grab_state(int n) { H_el *grab_shared(int); return grab_shared(sizeof(H_el)+n-sizeof(unsigned)); } #else #ifndef AUTO_RESIZE #define grab_state(n) (H_el *) \ emalloc(sizeof(H_el)+n-sizeof(ulong)); #else H_el * grab_state(int n) { H_el *p; int cnt = sizeof(H_el)+n-sizeof(ulong); #ifndef MA if (reclaim_size >= cnt+WS) { if ((cnt & (WS-1)) != 0) /* alignment */ { cnt += WS - (cnt & (WS-1)); } p = (H_el *) reclaim_mem; reclaim_mem += cnt; reclaim_size -= cnt; memset(p, 0, cnt); } else #endif { p = (H_el *) emalloc(cnt); } return p; } #endif #endif #endif #else extern volatile uchar *sh_pre_malloc(ulong); extern volatile uchar *sh_malloc(ulong); H_el * grab_state(int n) /* bfs_par */ { volatile uchar *rval = NULL; int m = sizeof(H_el) + n - sizeof(unsigned); if (n == 0) m = m/n; #ifdef BFS_SEP_HASH rval = emalloc((ulong) m); #else rval = sh_malloc((ulong) m); #endif memset((void *) rval, 0, (size_t) m); return (H_el *) rval; } #endif #ifdef COLLAPSE ulong ordinal(char *v, long n, short tp) { H_el *tmp, *ntmp; long m; H_el *olst = (H_el *) 0; s_hash((uchar *)v, n); #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) e_critical(BFS_ID); /* bfs_par / collapse */ #endif #if NCORE>1 && !defined(SEP_STATE) enter_critical(CS_ID); /* uses spinlock - 1..128 */ #endif tmp = H_tab[j1_spin]; if (!tmp) { tmp = grab_state(n); H_tab[j1_spin] = tmp; } else for ( ;; olst = tmp, tmp = tmp->nxt) { if (n == tmp->ln) { m = memcmp(((char *)&(tmp->state)), v, n); if (m == 0) goto done; if (m < 0) { Insert: ntmp = grab_state(n); ntmp->nxt = tmp; if (!olst) H_tab[j1_spin] = ntmp; else olst->nxt = ntmp; tmp = ntmp; break; } else if (!tmp->nxt) { Append: tmp->nxt = grab_state(n); tmp = tmp->nxt; break; } continue; } if (n < tmp->ln) goto Insert; else if (!tmp->nxt) goto Append; } #if NCORE>1 && !defined(SEP_STATE) enter_critical(GLOBAL_LOCK); #endif #ifdef BFS_PAR e_critical(BFS_ORD); /* bfs_par */ #endif m = ++ncomps[tp]; #ifdef BFS_PAR x_critical(BFS_ORD); #endif #if NCORE>1 && !defined(SEP_STATE) leave_critical(GLOBAL_LOCK); #endif #ifdef FULLSTACK tmp->tagged = m; #else tmp->st_id = m; #endif #if defined(AUTO_RESIZE) && !defined(BITSTATE) tmp->m_K1 = K1; #endif memcpy(((char *)&(tmp->state)), v, n); tmp->ln = n; done: #if NCORE>1 && !defined(SEP_STATE) leave_critical(CS_ID); #endif #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) x_critical(BFS_ID); #endif #ifdef FULLSTACK return tmp->tagged; #else return tmp->st_id; #endif } int compress(char *vin, int nin) /* collapse compression */ { char *w, *v = (char *) &comp_now; int i, j; ulong n; static char *x; static uchar nbytes[513]; /* 1 + 256 + 256 */ static unsigned short nbytelen; long col_q(int, char *); long col_p(int, char *); #ifndef SAFETY if (a_cycles) *v++ = now._a_t; #ifndef NOFAIR if (fairness) for (i = 0; i < NFAIR; i++) *v++ = now._cnt[i]; #endif #endif nbytelen = 0; #ifndef JOINPROCS for (i = 0; i < (int) now._nr_pr; i++) { n = col_p(i, (char *) 0); #ifdef NOFIX nbytes[nbytelen] = 0; #else nbytes[nbytelen] = 1; *v++ = ((P0 *) pptr(i))->_t; #endif *v++ = n&255; if (n >= (1<<8)) { nbytes[nbytelen]++; *v++ = (n>>8)&255; } if (n >= (1<<16)) { nbytes[nbytelen]++; *v++ = (n>>16)&255; } if (n >= (1<<24)) { nbytes[nbytelen]++; *v++ = (n>>24)&255; } nbytelen++; } #else x = scratch; for (i = 0; i < (int) now._nr_pr; i++) x += col_p(i, x); n = ordinal(scratch, x-scratch, 2); /* procs */ *v++ = n&255; nbytes[nbytelen] = 0; if (n >= (1<<8)) { nbytes[nbytelen]++; *v++ = (n>>8)&255; } if (n >= (1<<16)) { nbytes[nbytelen]++; *v++ = (n>>16)&255; } if (n >= (1<<24)) { nbytes[nbytelen]++; *v++ = (n>>24)&255; } nbytelen++; #endif #ifdef SEPQS for (i = 0; i < (int) now._nr_qs; i++) { n = col_q(i, (char *) 0); nbytes[nbytelen] = 0; *v++ = n&255; if (n >= (1<<8)) { nbytes[nbytelen]++; *v++ = (n>>8)&255; } if (n >= (1<<16)) { nbytes[nbytelen]++; *v++ = (n>>16)&255; } if (n >= (1<<24)) { nbytes[nbytelen]++; *v++ = (n>>24)&255; } nbytelen++; } #endif #ifdef NOVSZ /* 3 = _a_t, _nr_pr, _nr_qs */ w = (char *) &now + 3 * sizeof(uchar); #ifndef NOFAIR w += NFAIR; #endif #else #if VECTORSZ<65536 w = (char *) &(now._vsz) + sizeof(unsigned short); #else w = (char *) &(now._vsz) + sizeof(ulong); #endif #endif x = scratch; *x++ = now._nr_pr; *x++ = now._nr_qs; if (now._nr_qs > 0 && qptr(0) < pptr(0)) n = qptr(0) - (uchar *) w; else n = pptr(0) - (uchar *) w; j = w - (char *) &now; #if !defined(NOCOMP) && !defined(HC) for (i = 0; i < (int) n; i++, w++) if (!Mask[j++]) *x++ = *w; #else memcpy(x, w, n); x += n; #endif #ifndef SEPQS for (i = 0; i < (int) now._nr_qs; i++) x += col_q(i, x); #endif x--; for (i = 0, j = 6; i < nbytelen; i++) { if (j == 6) { j = 0; *(++x) = 0; } else j += 2; *x |= (nbytes[i] << j); } x++; for (j = 0; j < WS-1; j++) *x++ = 0; x -= j; j = 0; n = ordinal(scratch, x-scratch, 0); /* globals */ *v++ = n&255; if (n >= (1<< 8)) { *v++ = (n>> 8)&255; j++; } if (n >= (1<<16)) { *v++ = (n>>16)&255; j++; } if (n >= (1<<24)) { *v++ = (n>>24)&255; j++; } *v++ = j; /* add last count as a byte */ for (i = 0; i < WS-1; i++) *v++ = 0; v -= i; #if 0 printf("collapse %d -> %d\n", vsize, v - (char *)&comp_now); #endif return v - (char *)&comp_now; } #else #if !defined(NOCOMP) int compress(char *vin, int n) /* default compression */ { #ifdef HC int delta = 0; s_hash((uchar *)vin, n); /* sets K1 and K2 */ #ifndef SAFETY if (S_A) { delta++; /* _a_t */ #ifndef NOFAIR if (S_A > NFAIR) delta += NFAIR; /* _cnt[] */ #endif } #endif memcpy((char *) &comp_now + delta, (char *) &K1, WS); delta += WS; #if HC>0 memcpy((char *) &comp_now + delta, (char *) &K2, HC); delta += HC; #endif return delta; #else char *vv = vin; char *v = (char *) &comp_now; int i; #ifndef NO_FAST_C int r = 0, unroll = n/8; if (unroll > 0) { i = 0; while (r++ < unroll) { /* unroll 8 times, avoid ifs */ /* 1 */ *v = *vv++; v += 1 - Mask[i++]; /* 2 */ *v = *vv++; v += 1 - Mask[i++]; /* 3 */ *v = *vv++; v += 1 - Mask[i++]; /* 4 */ *v = *vv++; v += 1 - Mask[i++]; /* 5 */ *v = *vv++; v += 1 - Mask[i++]; /* 6 */ *v = *vv++; v += 1 - Mask[i++]; /* 7 */ *v = *vv++; v += 1 - Mask[i++]; /* 8 */ *v = *vv++; v += 1 - Mask[i++]; } r = n - i; /* the rest, at most 7 */ switch (r) { case 7: *v = *vv++; v += 1 - Mask[i++]; case 6: *v = *vv++; v += 1 - Mask[i++]; case 5: *v = *vv++; v += 1 - Mask[i++]; case 4: *v = *vv++; v += 1 - Mask[i++]; case 3: *v = *vv++; v += 1 - Mask[i++]; case 2: *v = *vv++; v += 1 - Mask[i++]; case 1: *v = *vv++; v += 1 - Mask[i++]; case 0: break; } n = i = v - (char *)&comp_now; /* bytes written so far */ r = (n+WS-1)/WS; /* in words, rounded up */ r *= WS; /* total bytes to fill */ i = r - i; /* remaining bytes */ switch (i) { case 7: *v++ = 0; /* fall thru */ case 6: *v++ = 0; case 5: *v++ = 0; case 4: *v++ = 0; case 3: *v++ = 0; case 2: *v++ = 0; case 1: *v++ = 0; case 0: break; default: Uerror("unexpected wordsize"); } v -= i; } else #endif { for (i = 0; i < n; i++, vv++) if (!Mask[i]) *v++ = *vv; for (i = 0; i < WS-1; i++) *v++ = 0; v -= i; } #if 0 printf("compress %d -> %d\n", n, v - (char *)&comp_now); #endif return v - (char *)&comp_now; #endif } #endif #endif #if defined(FULLSTACK) && defined(BITSTATE) #if defined(MA) #if !defined(onstack_now) int onstack_now(void) {} #endif #if !defined(onstack_put) void onstack_put(void) {} #endif #if !defined(onstack_zap) void onstack_zap(void) {} #endif #else int compact_stack(char *, int); void onstack_zap(void) { H_el *v, *w, *last = 0; H_el **tmp = H_tab; char *nv; int n, m; static char warned = 0; #if defined(BCS) && defined(NO_LAST) && defined(HAS_LAST) uchar was_last = now._last; now._last = 0; #endif H_tab = S_Tab; #ifndef NOCOMP nv = (char *) &comp_now; n = compress((char *)&now, vsize); #else #if defined(BITSTATE) && defined(LC) nv = (char *) &comp_now; n = compact_stack((char *)&now, vsize); #else nv = (char *) &now; n = vsize; #endif #endif #if !defined(HC) && !(defined(BITSTATE) && defined(LC)) s_hash((uchar *)nv, n); #endif H_tab = tmp; for (v = S_Tab[j1_spin]; v; Zh++, last=v, v=v->nxt) { m = memcmp(&(v->state), nv, n); if (m == 0) goto Found; if (m < 0) break; } /* NotFound: */ #ifndef ZAPH /* seen this happen, likely harmless in multicore */ if (warned == 0) { /* Uerror("stack out of wack - zap"); */ cpu_printf("pan: warning, stack incomplete\n"); warned = 1; } #endif goto done; Found: ZAPS++; if (last) last->nxt = v->nxt; else S_Tab[j1_spin] = v->nxt; v->tagged = (unsigned) n; #if !defined(NOREDUCE) && !defined(SAFETY) v->proviso = 0; #endif v->nxt = last = (H_el *) 0; for (w = Free_list; w; Fa++, last=w, w = w->nxt) { if ((int) w->tagged <= n) { if (last) { v->nxt = w; last->nxt = v; } else { v->nxt = Free_list; Free_list = v; } goto done; } if (!w->nxt) { w->nxt = v; goto done; } } Free_list = v; done: #if defined(BCS) && defined(NO_LAST) && defined(HAS_LAST) now._last = was_last; #endif return; } #ifndef BFS_PAR void onstack_put(void) { H_el **tmp = H_tab; #if defined(BCS) && defined(NO_LAST) && defined(HAS_LAST) uchar was_last = now._last; now._last = 0; #endif H_tab = S_Tab; if (h_store((char *)&now, vsize) != 0) #if defined(BITSTATE) && defined(LC) printf("pan: warning, double stack entry\n"); #else #ifndef ZAPH Uerror("cannot happen - unstack_put"); #endif #endif H_tab = tmp; trpt->ostate = Lstate; PUT++; #if defined(BCS) && defined(NO_LAST) && defined(HAS_LAST) now._last = was_last; #endif } int onstack_now(void) { H_el *tmp; H_el **tmp2 = H_tab; char *v; int n, m = 1; #if defined(BCS) && defined(NO_LAST) && defined(HAS_LAST) uchar was_last = now._last; now._last = 0; #endif H_tab = S_Tab; #ifdef NOCOMP #if defined(BITSTATE) && defined(LC) v = (char *) &comp_now; n = compact_stack((char *)&now, vsize); #else v = (char *) &now; n = vsize; #endif #else v = (char *) &comp_now; n = compress((char *)&now, vsize); #endif #if !defined(HC) && !(defined(BITSTATE) && defined(LC)) s_hash((uchar *)v, n); #endif H_tab = tmp2; for (tmp = S_Tab[j1_spin]; tmp; Zn++, tmp = tmp->nxt) { m = memcmp(((char *)&(tmp->state)),v,n); if (m <= 0) { Lstate = (H_el *) tmp; /* onstack_now */ break; } } PROBE++; #if defined(BCS) && defined(NO_LAST) && defined(HAS_LAST) now._last = was_last; #endif return (m == 0); } #endif #endif #endif #ifdef BITSTATE void init_SS(ulong); void sinit(void) { if (udmem) { udmem *= 1024L*1024L; #if NCORE>1 if (!readtrail) { init_SS((ulong) udmem); } else #endif #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) SS = (uchar *) sh_pre_malloc((ulong) udmem); #else SS = (uchar *) emalloc(udmem); #endif b_store = bstore_mod; } else { #if NCORE>1 init_SS(ONE_L<<(ssize-3)); #else #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) SS = (uchar *) sh_pre_malloc((ulong)(ONE_L<<(ssize-3))); #else SS = (uchar *) emalloc(ONE_L<<(ssize-3)); #endif #endif } } #else #if !defined(MA) || defined(COLLAPSE) void set_H_tab(void) { #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) H_tab = (H_el **) sh_pre_malloc((ulong)((ONE_L<1 && !defined(COLLAPSE) if (!readtrail) { void init_HT(ulong); init_HT(0L); } #endif #endif #endif #if !defined(MA) || defined(COLLAPSE) #if NCORE>1 || (defined(BFS_PAR) && defined(USE_TDH) && !defined(WIN32) && !defined(WIN64)) if (!readtrail) { void init_HT(ulong); init_HT((ulong) (ONE_L<= MA) { printf("pan: error, MA too small, recompile pan.c"); printf(" with -DMA=N with N>%d\n", n); Uerror("aborting"); } if (n > (int) maxgs) { maxgs = (uint) n; } for (i = 0; i < n; i++) { Info[i] = v[i]; } for ( ; i < MA-1; i++) { Info[i] = 0; } Info[MA-1] = pbit; if (a_cycles) /* place _a_t at the end */ { Info[MA] = Info[0]; Info[0] = 0; } #ifdef BFS_PAR e_critical(BFS_STATE); /* bfs_par / g_store */ #endif #if NCORE>1 && !defined(SEP_STATE) enter_critical(GLOBAL_LOCK); /* crude, but necessary */ /* to make this mode work, also replace emalloc with grab_shared inside store MA routines */ #endif if (!dfa_store(Info)) { if (pbit == 0 && (now._a_t&1) && depth > A_depth) { Info[MA] &= ~(1|16|32); /* _a_t */ if (dfa_member(MA)) { Info[MA-1] = 4; /* off-stack bit */ nShadow++; if (!dfa_member(MA-1)) { ret_val = 3; #ifdef VERBOSE printf("intersected 1st dfs stack\n"); #endif goto done; } } } ret_val = 0; #ifdef VERBOSE printf("new state\n"); #endif goto done; } #ifdef FULLSTACK if (pbit == 0) { Info[MA-1] = 1; /* proviso bit */ #ifndef BFS trpt->proviso = dfa_member(MA-1); #endif Info[MA-1] = 4; /* off-stack bit */ if (dfa_member(MA-1)) { ret_val = 1; /* off-stack */ #ifdef VERBOSE printf("old state\n"); #endif } else { ret_val = 2; /* on-stack */ #ifdef VERBOSE printf("on-stack\n"); #endif } goto done; } #endif ret_val = 1; #ifdef VERBOSE printf("old state\n"); #endif done: #ifdef BFS_PAR x_critical(BFS_STATE); #endif #if NCORE>1 && !defined(SEP_STATE) leave_critical(GLOBAL_LOCK); #endif return ret_val; /* old state */ } #endif #if defined(BITSTATE) && defined(LC) int compact_stack(char *vin, int n) { int delta = 0; s_hash((uchar *)vin, n); /* sets K1 and K2 */ #ifndef SAFETY delta++; /* room for state[0] |= 128 */ #endif memcpy((char *) &comp_now + delta, (char *) &K1, WS); delta += WS; memcpy((char *) &comp_now + delta, (char *) &K2, WS); delta += WS; /* use all available bits */ return delta; } #endif #ifdef TRIX void sv_populate(void) { int i, cnt = 0; TRIX_v6 **base = processes; int bound = now._nr_pr; /* MAXPROC+1; */ #ifdef V_TRIX printf("%4d: sv_populate\n", depth); #endif again: for (i = 0; i < bound; i++) { if (base[i] != NULL) { H_el *tmp; int m, n; uchar *v; #ifndef BFS if (base[i]->modified == 0) { cnt++; #ifdef V_TRIX printf("%4d: %s %d not modified\n", depth, (base == processes)?"proc":"chan", i); #endif continue; } #ifndef V_MOD base[i]->modified = 0; #endif #endif #ifdef TRIX_RIX if (base == processes) { ((P0 *)pptr(i))->_pid = 0; } #endif n = base[i]->psize; v = base[i]->body; s_hash(v, n); /* sets j1_spin */ tmp = H_tab[j1_spin]; if (!tmp) /* new */ { tmp = grab_state(n); H_tab[j1_spin] = tmp; m = 1; /* non-zero */ } else { H_el *ntmp, *olst = (H_el *) 0; for (;; hcmp++, olst = tmp, tmp = tmp->nxt) { m = memcmp(((char *)&(tmp->state)), v, n); if (m == 0) /* match */ { break; } else if (m < 0) /* insert */ { ntmp = grab_state(n); ntmp->nxt = tmp; if (!olst) H_tab[j1_spin] = ntmp; else olst->nxt = ntmp; tmp = ntmp; break; } else if (!tmp->nxt) /* append */ { tmp->nxt = grab_state(n); tmp = tmp->nxt; break; } } } if (m != 0) { memcpy((char *)&(tmp->state), v, n); #if defined(AUTO_RESIZE) && !defined(BITSTATE) tmp->m_K1 = K1; /* set via s_hash */ #endif if (verbose) { if (base == processes) { _p_count[i]++; } else { _c_count[i]++; } } } now._ids_[cnt++] = (char *)&(tmp->state); #ifdef TRIX_RIX if (base == processes) { ((P0 *)pptr(i))->_pid = i; if (BASE > 0 && i > 0) { ((P0 *)pptr(i))->_pid -= BASE; } } #endif } } /* do the same for all channels */ if (base == processes) { base = channels; bound = now._nr_qs; /* MAXQ+1; */ goto again; } } #endif #if !defined(BFS_PAR) || (!defined(BITSTATE) && !defined(USE_TDH)) int h_store(char *vin, int nin) /* hash table storage */ { H_el *ntmp; H_el *tmp, *olst = (H_el *) 0; char *v; int n, m=0; #ifdef HC uchar rem_a; #endif #ifdef TRIX sv_populate(); /* update proc and chan ids */ #endif #ifdef NOCOMP #if defined(BITSTATE) && defined(LC) if (S_Tab == H_tab) { v = (char *) &comp_now; n = compact_stack(vin, nin); } else { v = vin; n = nin; } #else v = vin; n = nin; #endif #else v = (char *) &comp_now; #ifdef HC rem_a = now._a_t; now._a_t = 0; #endif n = compress(vin, nin); #ifdef HC now._a_t = rem_a; #endif #ifndef SAFETY if (S_A) { v[0] = 0; /* _a_t */ #ifndef NOFAIR if (S_A > NFAIR) for (m = 0; m < NFAIR; m++) v[m+1] = 0; /* _cnt[] */ #endif m = 0; } #endif #endif #if !defined(HC) && !(defined(BITSTATE) && defined(LC)) s_hash((uchar *)v, n); #endif /* for BFS_PAR we can only get here in BITSTATE mode */ /* and in that case we don't use locks */ #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) e_critical(BFS_ID); /* bfs_par / h_store */ #endif #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE) enter_critical(CS_ID); #endif tmp = H_tab[j1_spin]; if (!tmp) { tmp = grab_state(n); #if NCORE>1 if (!tmp) { /* if we get here -- we've already issued a warning */ /* but we want to allow the normal distributed termination */ /* to collect the stats on all cpus in the wrapup */ #if !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif return 1; /* allow normal termination */ } #endif H_tab[j1_spin] = tmp; } else { for (;; hcmp++, olst = tmp, tmp = tmp->nxt) { /* skip the _a_t and the _cnt bytes */ #ifdef COLLAPSE if (tmp->ln != 0) { if (!tmp->nxt) goto Append; continue; } #endif m = memcmp(((char *)&(tmp->state)) + S_A, v + S_A, n - S_A); if (m == 0) { #ifdef SAFETY #define wasnew 0 #else int wasnew = 0; #endif #if !defined(SAFETY) && !defined(NOCOMP) if (S_A) { if ((((char *)&(tmp->state))[0] & V_A) != V_A) { wasnew = 1; nShadow++; ((char *)&(tmp->state))[0] |= V_A; } #ifndef NOFAIR if (S_A > NFAIR) { /* 0 <= now._cnt[now._a_t&1] < MAXPROC */ uint ci, bp; /* index, bit pos */ ci = (now._cnt[now._a_t&1] / 8); bp = (now._cnt[now._a_t&1] - 8*ci); if (now._a_t&1) /* use tail-bits in _cnt */ { ci = (NFAIR - 1) - ci; bp = 7 - bp; /* bp = 0..7 */ } ci++; /* skip over _a_t */ bp = 1 << bp; /* the bit mask */ if ((((char *)&(tmp->state))[ci] & bp)==0) { if (!wasnew) { wasnew = 1; nShadow++; } ((char *)&(tmp->state))[ci] |= bp; } } /* else: wasnew == 0, i.e., old state */ #endif } #endif #if NCORE>1 Lstate = (H_el *) tmp; /* h_store */ #endif #ifdef FULLSTACK #ifndef SAFETY if (wasnew) { Lstate = (H_el *) tmp; /* h_store */ tmp->tagged |= V_A; if ((now._a_t&1) && (tmp->tagged&A_V) && depth > A_depth) { intersect: #ifdef CHECK #if NCORE>1 printf("cpu%d: ", core_id); #endif printf("1st dfs-stack intersected on state %d+\n", (int) tmp->st_id); #endif #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) x_critical(BFS_ID); #endif #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif return 3; } #ifdef CHECK #if NCORE>1 printf("cpu%d: ", core_id); #endif printf(" New state %d+\n", (int) tmp->st_id); #endif #ifdef DEBUG dumpstate(1, (char *)&(tmp->state),n,tmp->tagged); #endif #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) x_critical(BFS_ID); #endif #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif return 0; } else #endif if ((S_A)?(tmp->tagged&V_A):tmp->tagged) { Lstate = (H_el *) tmp; /* h_store */ #ifndef SAFETY /* already on current dfs stack */ /* but may also be on 1st dfs stack */ if ((now._a_t&1) && (tmp->tagged&A_V) && depth > A_depth #ifndef NOFAIR && (!fairness || now._cnt[1] <= 1) #endif ) goto intersect; #endif #ifdef CHECK #if NCORE>1 printf("cpu%d: ", core_id); #endif printf(" Stack state %d\n", (int) tmp->st_id); #endif #ifdef DEBUG dumpstate(0, (char *)&(tmp->state),n,tmp->tagged); #endif #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) x_critical(BFS_ID); #endif #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif return 2; /* match on stack */ } #else if (wasnew) { #ifdef CHECK #if NCORE>1 printf("cpu%d: ", core_id); #endif printf(" New state %d+\n", (int) tmp->st_id); #endif #ifdef DEBUG dumpstate(1, (char *)&(tmp->state), n, 0); #endif #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) x_critical(BFS_ID); #endif #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif return 0; } #endif #ifdef CHECK #if NCORE>1 printf("cpu%d: ", core_id); #endif printf(" Old state %d\n", (int) tmp->st_id); #endif #ifdef DEBUG dumpstate(0, (char *)&(tmp->state), n, 0); #endif #if defined(BCS) #ifdef CONSERVATIVE if (tmp->ctx_low > trpt->sched_limit) { tmp->ctx_low = trpt->sched_limit; tmp->ctx_pid[(now._last)/8] = 1 << ((now._last)%8); /* new */ #ifdef CHECK #if NCORE>1 printf("cpu%d: ", core_id); #endif printf(" Revisit with fewer context switches\n"); #endif nstates--; #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) x_critical(BFS_ID); #endif #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif return 0; } else if ((tmp->ctx_low == trpt->sched_limit && (tmp->ctx_pid[(now._last)/8] & ( 1 << ((now._last)%8) )) == 0 )) { tmp->ctx_pid[(now._last)/8] |= 1 << ((now._last)%8); /* add */ #ifdef CHECK #if NCORE>1 printf("cpu%d: ", core_id); #endif printf(" Revisit with same nr of context switches\n"); #endif nstates--; #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) x_critical(BFS_ID); #endif #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif return 0; } #endif #endif #ifdef REACH if (tmp->D > depth) { tmp->D = depth; #ifdef CHECK #if NCORE>1 printf("cpu%d: ", core_id); #endif printf(" ReVisiting (from smaller depth)\n"); #endif nstates--; #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) x_critical(BFS_ID); #endif #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif return 0; } #endif #if (defined(BFS) && defined(Q_PROVISO)) || NCORE>1 Lstate = (H_el *) tmp; /* h_store */ #endif #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) x_critical(BFS_ID); #endif #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif return 1; /* match outside stack */ } else if (m < 0) { /* insert state before tmp */ ntmp = grab_state(n); #if NCORE>1 if (!ntmp) { #if !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif return 1; /* allow normal termination */ } #endif ntmp->nxt = tmp; if (!olst) H_tab[j1_spin] = ntmp; else olst->nxt = ntmp; tmp = ntmp; break; } else if (!tmp->nxt) { /* append after tmp */ #ifdef COLLAPSE Append: #endif tmp->nxt = grab_state(n); #if NCORE>1 if (!tmp->nxt) { #if !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif return 1; /* allow normal termination */ } #endif tmp = tmp->nxt; break; } } } #ifdef CHECK tmp->st_id = (unsigned) nstates; #if NCORE>1 printf("cpu%d: ", core_id); #endif #ifdef BITSTATE printf(" Push state %d\n", ((int) nstates) - 1); #else printf(" New state %d\n", (int) nstates); #endif #endif #if defined(BCS) tmp->ctx_low = trpt->sched_limit; #ifdef CONSERVATIVE tmp->ctx_pid[(now._last)/8] = 1 << ((now._last)%8); /* new limit */ #endif #endif #if !defined(SAFETY) || defined(REACH) tmp->D = depth; #endif #if !defined(SAFETY) && !defined(NOCOMP) if (S_A) { v[0] = V_A; #ifndef NOFAIR if (S_A > NFAIR) { uint ci, bp; /* as above */ ci = (now._cnt[now._a_t&1] / 8); bp = (now._cnt[now._a_t&1] - 8*ci); if (now._a_t&1) { ci = (NFAIR - 1) - ci; bp = 7 - bp; /* bp = 0..7 */ } v[1+ci] = 1 << bp; } #endif } #endif #if defined(AUTO_RESIZE) && !defined(BITSTATE) tmp->m_K1 = K1; #endif memcpy(((char *)&(tmp->state)), v, n); #ifdef FULLSTACK tmp->tagged = (S_A)?V_A:(depth+1); #ifdef DEBUG dumpstate(-1, v, n, tmp->tagged); #endif Lstate = (H_el *) tmp; /* end of h_store */ #else #ifdef DEBUG dumpstate(-1, v, n, 0); #endif #if NCORE>1 Lstate = (H_el *) tmp; /* end of h_store */ #endif #endif #if defined(BFS_PAR) && !defined(BFS_SEP_HASH) x_critical(BFS_ID); #endif #if NCORE>1 #ifdef V_PROVISO tmp->cpu_id = core_id; #endif #if !defined(SEP_STATE) && !defined(BITSTATE) leave_critical(CS_ID); #endif #endif return 0; } #endif void o_hash32(uchar *s, int len, int h) /* 32-bit, like d_sfh, but with seed */ { uint32_t tmp; int rem; rem = len & 3; len >>= 2; for ( ; len > 0; len--) { h += get16bits(s); tmp = (get16bits(s+2) << 11) ^ h; h = (h << 16) ^ tmp; s += 2*sizeof(uint16_t); h += h >> 11; } switch (rem) { case 3: h += get16bits(s); h ^= h << 16; h ^= s[sizeof(uint16_t)] << 18; h += h >> 11; break; case 2: h += get16bits(s); h ^= h << 11; h += h >> 17; break; case 1: h += *s; h ^= h << 10; h += h >> 1; break; } h ^= h << 3; h += h >> 5; h ^= h << 4; h += h >> 17; h ^= h << 25; h += h >> 6; K1 = h; } void o_hash64(uchar *kb, int nbytes, int seed) { uint8_t *bp; uint64_t a, b, c, n; const uint64_t *k = (uint64_t *) kb; n = nbytes/WS; /* nr of 8-byte chunks */ /* extend to multiple of words, if needed */ a = WS - (nbytes % WS); if (a > 0 && a < WS) { n++; bp = kb + nbytes; switch (a) { case 7: *bp++ = 0; /* fall thru */ case 6: *bp++ = 0; /* fall thru */ case 5: *bp++ = 0; /* fall thru */ case 4: *bp++ = 0; /* fall thru */ case 3: *bp++ = 0; /* fall thru */ case 2: *bp++ = 0; /* fall thru */ case 1: *bp = 0; case 0: break; } } a = (uint64_t) seed; b = HASH_CONST[HASH_NR]; c = 0x9e3779b97f4a7c13LL; /* arbitrary */ while (n >= 3) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); n -= 3; k += 3; } c += (((uint64_t) nbytes)<<3); switch (n) { case 2: b += k[1]; case 1: a += k[0]; case 0: break; } mix(a,b,c); K1 = a; } #if defined(USE_TDH) && !defined(WIN32) && !defined(WIN64) #ifdef HC #ifndef T_HC #ifdef BFS_HC #define T_HC BFS_HC #else #define T_HC 2 #endif #endif #if T_HC<1 || T_HC>4 #error "BFS_HC must be 1, 2, 3, or 4 (default is 2)" #endif #endif #define T_ROW 6 #define T_ROW_SIZE (1< x)) { Uerror("assertion x * (ulong) T_VSZ > x fails"); } #ifdef BFS_SEP_HASH ohash_sd = (char *) emalloc(x * (ulong) T_VSZ); #else ohash_sd = (volatile char *) sh_pre_malloc(x * (ulong) T_VSZ); #endif #else ohash_hc_sz = (ulong) (T_HC * (ulong) sizeof(uint32_t)); if (!(x * ohash_hc_sz > x)) { Uerror("assertion x * ohash_hc_sz > x fails"); } #ifdef BFS_SEP_HASH ohash_sd = (char *) emalloc(x * ohash_hc_sz); #else ohash_sd = (volatile char *) sh_pre_malloc(x * ohash_hc_sz); #endif #endif #ifdef BFS_SEP_HASH ohash_hv = (uint32_t *) emalloc(x * (ulong) sizeof(uint32_t)); #else ohash_hv = (volatile uint32_t *) sh_pre_malloc(x * (ulong) sizeof(uint32_t)); #endif ohash_mask = (((ulong)1)<o_pm &= ~2; #ifdef VERBOSE bfs_printf("check to mark\n"); #endif for (i = 0; i < (int) now._nr_pr; i++) { P0 *ptr = (P0 *) pptr(i); if (accpstate[ptr->_t][ptr->_p]) { trpt->o_pm |= 2; now._l_bnd = L_bound; now._l_sds = (uchar *) 0; #ifdef VERBOSE bfs_printf("mark state live\n"); #endif break; } } } void bfs_check_live(uchar b, uchar *s) { /* assert(b>0); */ now._l_bnd = b-1; /* decrease bound */ #ifdef VERBOSE bfs_printf("check live %d\n", b); #endif if (b == L_bound && boq == -1) { now._l_sds = (uchar *) Lstate; /* new target */ } else { now._l_sds = s; /* restore target */ if (s == (uchar *) Lstate) { depthfound = depth - (BASE+1)*(L_bound - now._l_bnd - 1); uerror("accept cycle found"); depthfound = -1; now._l_bnd = 0; now._l_sds = (uchar *) 0; } } #ifdef VERBOSE bfs_printf("set l_bound to %d -- sds %p\n", b-1, (void *) now._l_sds); #endif } #endif /* closed hashing with locality - similar to ltsmin */ int o_store(const char *vin, int nin) { int i, seed = 0; ulong hash_v, ix, ex; uint32_t T_BUSY, T_DONE; volatile uint32_t *t_entry; #ifdef HC ulong vs = ohash_hc_sz; #else ulong vs = (ulong) T_VSZ; #endif #ifdef L_BOUND uchar o_bnd, *o_sds; #endif #ifndef STOP_ON_FULL if (h_table_full) { goto done; } #endif #ifdef L_BOUND if (now._l_bnd == 0) { bfs_mark_live(); } #ifdef VERBOSE else { bfs_printf("non-markable state %d\n", now._l_bnd); } #endif o_bnd = now._l_bnd; o_sds = now._l_sds; now._l_bnd = (o_bnd)?1:0; /* mark nested phase of bounded search */ now._l_sds = (uchar *) 0; #endif #if !defined(HC) && !defined(T_NOCOMP) nin = compress((char *)vin, nin); vin = (char *) &comp_now; #endif do { o_hash((uchar *)vin, nin, seed++); hash_v = K1; } while (hash_v == T_FREE || hash_v == T_STAT); /* unlikely, hash_v 0 or 1 */ T_BUSY = ((uint32_t) hash_v & ~((uint32_t) T_STAT)); /* hash with status bit 0 */ T_DONE = ((uint32_t) hash_v | ((uint32_t) T_STAT)); /* hash with status bit 1 */ #ifdef HC d_hash((uchar *)vin, nin); ohash_hc[0] = (uint32_t) K1; #if T_HC>1 ohash_hc[1] = (uint32_t) (K1>>32); #endif #if T_HC>2 ohash_hc[2] = (uint32_t) K2; #endif #if T_HC>3 ohash_hc[3] = (uint32_t) (K2>>32); #endif #endif while (seed < ohash_max) { ix = hash_v & ohash_mask; ex = (ix & T_ROW_MASK) + T_ROW_SIZE; for (i = 0; i < T_ROW_SIZE; i++) { t_entry = (uint32_t *) &ohash_hv[ix]; if (*t_entry == T_FREE && cas(t_entry, T_FREE, T_BUSY)) { #ifndef HC memcpy((char *) &ohash_sd[ix * vs], vin, nin); #else memcpy((char *) &ohash_sd[ix * vs], (char *) ohash_hc, vs); #endif #if defined(USE_TDH) && defined(Q_PROVISO) ohash_inq[ix] = (uchar) BFS_INQ; Lstate = (H_el *) &ohash_inq[ix]; #endif *t_entry = T_DONE; #ifdef VERBOSE #ifdef L_BOUND bfs_printf("New state %p [%p]\n", (void *) Lstate, (void *) o_sds); #else bfs_printf("New state %p\n", (void *) Lstate); #endif #endif #ifdef L_BOUND if (o_bnd) { bfs_check_live(o_bnd, o_sds); } #endif return 0; /* New State */ } while (*t_entry == T_BUSY) { usleep(2); /* wait */ } if (*t_entry == T_DONE /* (first) hash matches, check data */ #ifndef HC && memcmp((char *) &ohash_sd[ix * vs], vin, nin) == 0) #else && memcmp((char *) &ohash_sd[ix * vs], (char *) ohash_hc, vs) == 0) #endif { #if defined(USE_TDH) && defined(Q_PROVISO) Lstate = (H_el *) &ohash_inq[ix]; #endif #ifdef VERBOSE #ifdef L_BOUND bfs_printf("Old state %p [%p]\n", (void *) Lstate, (void *) o_sds); #else bfs_printf("Old state %p\n", (void *) Lstate); #endif #endif #ifdef L_BOUND if (o_bnd) { bfs_check_live(o_bnd, o_sds); } #endif return 1; /* Old State */ } hcmp++; ix++; ix = (ix==ex) ? ex - T_ROW_SIZE : ix; } /* find a new slot: */ do { o_hash((uchar *)vin, nin, (int) (hash_v + seed++)); hash_v = K1; } while (hash_v == T_FREE || hash_v == T_STAT); T_BUSY = ((uint32_t) hash_v & ~((uint32_t) T_STAT)); T_DONE = ((uint32_t) hash_v | ((uint32_t) T_STAT)); } #ifdef STOP_ON_FULL Uerror("hash table full"); /* no return from Uerror */ #else if (!h_table_full) { h_table_full++; if (who_am_i == 0) { bfs_printf("hash table is full\n"); } } done: bfs_punt++; /* counts this as a lost state */ #endif #ifdef L_BOUND now._l_bnd = 0; /* no more checking */ now._l_sds = (uchar *) 0; #endif return 1; /* technically should be 0, but we want to throttle down */ } #endif #endif #include TRANSITIONS void do_reach(void) { r_ck(reached0, _nstates0, 0, src_ln0, src_file0); r_ck(reached1, _nstates1, 1, src_ln1, src_file1); } void iniglobals(int calling_pid) { { int l_in; for (l_in = 0; l_in < 4; l_in++) { eating[l_in] = 0; } } { int l_in; for (l_in = 0; l_in < 4; l_in++) { now.fork[l_in] = 0; } } #ifdef VAR_RANGES { int l_in; for (l_in = 0; l_in < 4; l_in++) { logval("fork[l_in]", now.fork[l_in]); } } #endif } int addqueue(int calling_pid, int n, int is_rv) { int j=0, i = now._nr_qs; #if !defined(NOCOMP) && !defined(TRIX) int k; #endif if (i >= MAXQ) Uerror("too many queues"); #ifdef V_TRIX printf("%4d: add queue %d\n", depth, i); #endif switch (n) { default: Uerror("bad queue - addqueue"); } #ifdef BFS_PAR bfs_prepmask(2); /* addqueue */ #endif #ifdef TRIX vsize += sizeof(H_el *); #else if (vsize%WS) q_skip[i] = WS-(vsize%WS); else q_skip[i] = 0; #if !defined(NOCOMP) && !defined(HC) k = vsize; #ifndef BFS if (is_rv) k += j; #endif for (k += (int) q_skip[i]; k > vsize; k--) Mask[k-1] = 1; #endif vsize += (int) q_skip[i]; q_offset[i] = vsize; vsize += j; #ifdef BFS_PAR bfs_fixmask(2); /* addqueue */ #endif #endif now._nr_qs += 1; #ifndef NOVSZ now._vsz = vsize; #endif hmax = max(hmax, vsize); #ifdef TRIX #ifndef BFS if (freebodies) { channels[i] = freebodies; freebodies = freebodies->nxt; } else { channels[i] = (TRIX_v6 *) emalloc(sizeof(TRIX_v6)); channels[i]->body = (uchar *) emalloc(Maxbody * sizeof(char)); } channels[i]->modified = 1; /* addq */ #endif channels[i]->psize = j; channels[i]->parent_pid = calling_pid; channels[i]->nxt = (TRIX_v6 *) 0; #else if (vsize >= VECTORSZ) Uerror("VECTORSZ is too small, edit pan.h"); #endif if (j > 0) { memset((char *)qptr(i), 0, j); } ((Q0 *)qptr(i))->_t = n; return i+1; } #ifdef TRIX int what_p_size(int t) { int j; switch (t) { case 0: j = sizeof(P0); break; case 1: j = sizeof(P1); break; case 2: j = sizeof(P2); break; default: Uerror("bad proctype"); } return j; } int what_q_size(int t) { int j; switch (t) { case 0: j = sizeof(Q0); break; default: Uerror("bad qtype"); } return j; } #endif #if NQS>0 void qsend(int into, int sorted, int args_given) { int j; uchar *z; #ifdef HAS_SORTED int k; #endif if (!into--) uerror("ref to uninitialized chan name (sending)"); if (into >= (int) now._nr_qs || into < 0) Uerror("qsend bad queue#"); #if defined(TRIX) && !defined(BFS) #ifndef TRIX_ORIG (trpt+1)->q_bup = now._ids_[now._nr_pr+into]; #ifdef V_TRIX printf("%4d: channel %d s save %p from %d\n", depth, into, (trpt+1)->q_bup, now._nr_pr+into); #endif #endif channels[into]->modified = 1; /* qsend */ #ifdef V_TRIX printf("%4d: channel %d modified\n", depth, into); #endif #endif z = qptr(into); j = ((Q0 *)qptr(into))->Qlen; switch (((Q0 *)qptr(into))->_t) { case 0: printf("queue %d was deleted\n", into+1); default: Uerror("bad queue - qsend"); } #ifdef EVENT_TRACE if (in_s_scope(into+1)) require('s', into); #endif } #endif #if SYNC int q_zero(int from) { if (!from--) { uerror("ref to uninitialized chan name (q_zero)"); return 0; } switch(((Q0 *)qptr(from))->_t) { case 0: printf("queue %d was deleted\n", from+1); } Uerror("bad queue q-zero"); return -1; } int not_RV(int from) { if (q_zero(from)) { printf("==>> a test of the contents of a rv "); printf("channel always returns FALSE\n"); uerror("error to poll rendezvous channel"); } return 1; } #endif #ifndef XUSAFE void setq_claim(int x, int m, char *s, int y, char *p) { if (x == 0) uerror("x[rs] claim on uninitialized channel"); if (x < 0 || x > MAXQ) Uerror("cannot happen setq_claim"); q_claim[x] |= m; p_name[y] = p; q_name[x] = s; if (m&2) q_S_check(x, y); if (m&1) q_R_check(x, y); } short q_sender[MAXQ+1]; int q_S_check(int x, int who) { if (!q_sender[x]) { q_sender[x] = who+1; #if SYNC if (q_zero(x)) { printf("chan %s (%d), ", q_name[x], x-1); printf("sndr proc %s (%d)\n", p_name[who], who); uerror("xs chans cannot be used for rv"); } #endif } else if (q_sender[x] != who+1) { printf("pan: xs assertion violated: "); printf("access to chan <%s> (%d)\npan: by ", q_name[x], x-1); if (q_sender[x] > 0 && p_name[q_sender[x]-1]) printf("%s (proc %d) and by ", p_name[q_sender[x]-1], q_sender[x]-1); printf("%s (proc %d)\n", p_name[who], who); uerror("error, partial order reduction invalid"); } return 1; } short q_recver[MAXQ+1]; int q_R_check(int x, int who) { #ifdef VERBOSE printf("q_R_check x=%d who=%d\n", x, who); #endif if (!q_recver[x]) { q_recver[x] = who+1; #if SYNC if (q_zero(x)) { printf("chan %s (%d), ", q_name[x], x-1); printf("recv proc %s (%d)\n", p_name[who], who); uerror("xr chans cannot be used for rv"); } #endif } else if (q_recver[x] != who+1) { printf("pan: xr assertion violated: "); printf("access to chan %s (%d)\npan: ", q_name[x], x-1); if (q_recver[x] > 0 && p_name[q_recver[x]-1]) printf("by %s (proc %d) and ", p_name[q_recver[x]-1], q_recver[x]-1); printf("by %s (proc %d)\n", p_name[who], who); uerror("error, partial order reduction invalid"); } return 1; } #endif int q_len(int x) { if (!x--) uerror("ref to uninitialized chan name (len)"); return ((Q0 *)qptr(x))->Qlen; } int q_full(int from) { if (!from--) uerror("ref to uninitialized chan name (qfull)"); switch(((Q0 *)qptr(from))->_t) { case 0: printf("queue %d was deleted\n", from+1); } Uerror("bad queue - q_full"); return 0; } #ifdef HAS_UNLESS int q_e_f(int from) { /* empty or full */ return !q_len(from) || q_full(from); } #endif #if NQS>0 int qrecv(int from, int slot, int fld, int done) { uchar *z; int j, k, r=0; if (!from--) uerror("ref to uninitialized chan name (receiving)"); #if defined(TRIX) && !defined(BFS) #ifndef TRIX_ORIG (trpt+1)->q_bup = now._ids_[now._nr_pr+from]; #ifdef V_TRIX printf("%4d: channel %d r save %p from %d\n", depth, from, (trpt+1)->q_bup, now._nr_pr+from); #endif #endif channels[from]->modified = 1; /* qrecv */ #ifdef V_TRIX printf("%4d: channel %d modified\n", depth, from); #endif #endif if (from >= (int) now._nr_qs || from < 0) Uerror("qrecv bad queue#"); z = qptr(from); #ifdef EVENT_TRACE if (done && (in_r_scope(from+1))) require('r', from); #endif switch (((Q0 *)qptr(from))->_t) { case 0: printf("queue %d was deleted\n", from+1); default: Uerror("bad queue - qrecv"); } return r; } #endif #ifndef BITSTATE #ifdef COLLAPSE long col_q(int i, char *z) { int j=0, k; char *x, *y; Q0 *ptr = (Q0 *) qptr(i); switch (ptr->_t) { default: Uerror("bad qtype - collapse"); } if (z) x = z; else x = scratch; y = (char *) ptr; k = q_offset[i]; #if NQS > 0 /* no need to store the empty slots at the end */ j -= (q_max[ptr->_t] - ptr->Qlen) * ((j - 2)/q_max[ptr->_t]); #endif #if !defined(NOCOMP) && !defined(HC) for ( ; j > 0; j--, y++) if (!Mask[k++]) *x++ = *y; #else memcpy(x, y, j); x += j; #endif for (j = 0; j < WS-1; j++) *x++ = 0; x -= j; if (z) return (long) (x - z); return ordinal(scratch, x-scratch, 1); /* chan */ } #endif #endif int unsend(int into) { int _m=0, j; uchar *z; #ifdef HAS_SORTED int k; #endif if (!into--) uerror("ref to uninitialized chan (unsend)"); #if defined(TRIX) && !defined(BFS) #ifndef TRIX_ORIG now._ids_[now._nr_pr+into] = trpt->q_bup; #ifdef V_TRIX printf("%4d: channel %d s restore %p into %d\n", depth, into, trpt->q_bup, now._nr_pr+into); #endif #else channels[into]->modified = 1; /* unsend */ #ifdef V_TRIX printf("%4d: channel %d unmodify\n", depth, into); #endif #endif #endif z = qptr(into); j = ((Q0 *)z)->Qlen; ((Q0 *)z)->Qlen = --j; switch (((Q0 *)qptr(into))->_t) { default: Uerror("bad queue - unsend"); } return _m; } void unrecv(int from, int slot, int fld, int fldvar, int strt) { int j; uchar *z; if (!from--) uerror("ref to uninitialized chan (unrecv)"); #if defined(TRIX) && !defined(BFS) #ifndef TRIX_ORIG now._ids_[now._nr_pr+from] = trpt->q_bup; #ifdef V_TRIX printf("%4d: channel %d r restore %p into %d\n", depth, from, trpt->q_bup, now._nr_pr+from); #endif #else channels[from]->modified = 1; /* unrecv */ #ifdef V_TRIX printf("%4d: channel %d unmodify\n", depth, from); #endif #endif #endif z = qptr(from); j = ((Q0 *)z)->Qlen; if (strt) ((Q0 *)z)->Qlen = j+1; switch (((Q0 *)qptr(from))->_t) { default: Uerror("bad queue - qrecv"); } } int q_cond(short II, Trans *t) { int i = 0; for (i = 0; i < 6; i++) { if (t->ty[i] == TIMEOUT_F) return 1; if (t->ty[i] == ALPHA_F) #ifdef GLOB_ALPHA return 0; #else return (II+1 == (short) now._nr_pr && II+1 < MAXPROC); #endif switch (t->qu[i]) { case 0: break; default: Uerror("unknown qid - q_cond"); return 0; } } return 1; } void to_compile(void) { char ctd[2048], carg[128]; #ifdef BITSTATE strcpy(ctd, "-DBITSTATE "); #else strcpy(ctd, ""); #endif #ifdef BFS_PAR strcat(ctd, "-DBFS_PAR "); #endif #ifdef NOVSZ strcat(ctd, "-DNOVSZ "); #endif #ifdef RHASH strcat(ctd, "-DRHASH "); #else #ifdef PERMUTED strcat(ctd, "-DPERMUTED "); #endif #endif #ifdef P_REVERSE strcat(ctd, "-DP_REVERSE "); #endif #ifdef T_REVERSE strcat(ctd, "-DT_REVERSE "); #endif #ifdef T_RAND #if T_RAND>0 sprintf(carg, "-DT_RAND=%d ", T_RAND); strcat(ctd, carg); #else strcat(ctd, "-DT_RAND "); #endif #endif #ifdef P_RAND #if P_RAND>0 sprintf(carg, "-DP_RAND=%d ", P_RAND); strcat(ctd, carg); #else strcat(ctd, "-DP_RAND "); #endif #endif #ifdef BCS sprintf(carg, "-DBCS=%d ", BCS); strcat(ctd, carg); #endif #ifdef BFS strcat(ctd, "-DBFS "); #endif #ifdef MEMLIM sprintf(carg, "-DMEMLIM=%d ", MEMLIM); strcat(ctd, carg); #else #ifdef MEMCNT sprintf(carg, "-DMEMCNT=%d ", MEMCNT); strcat(ctd, carg); #endif #endif #ifdef NOCLAIM strcat(ctd, "-DNOCLAIM "); #endif #ifdef SAFETY strcat(ctd, "-DSAFETY "); #else #ifdef NOFAIR strcat(ctd, "-DNOFAIR "); #else #ifdef NFAIR if (NFAIR != 2) { sprintf(carg, "-DNFAIR=%d ", NFAIR); strcat(ctd, carg); } #endif #endif #endif #ifdef NOREDUCE strcat(ctd, "-DNOREDUCE "); #else #ifdef XUSAFE strcat(ctd, "-DXUSAFE "); #endif #endif #ifdef NP strcat(ctd, "-DNP "); #endif #ifdef PEG strcat(ctd, "-DPEG "); #endif #ifdef VAR_RANGES strcat(ctd, "-DVAR_RANGES "); #endif #ifdef HC strcat(ctd, "-DHC "); #endif #ifdef CHECK strcat(ctd, "-DCHECK "); #endif #ifdef CTL strcat(ctd, "-DCTL "); #endif #ifdef TRIX strcat(ctd, "-DTRIX "); #endif #ifdef NIBIS strcat(ctd, "-DNIBIS "); #endif #ifdef NOBOUNDCHECK strcat(ctd, "-DNOBOUNDCHECK "); #endif #ifdef NOSTUTTER strcat(ctd, "-DNOSTUTTER "); #endif #ifdef REACH strcat(ctd, "-DREACH "); #endif #ifdef PRINTF strcat(ctd, "-DPRINTF "); #endif #ifdef COLLAPSE strcat(ctd, "-DCOLLAPSE "); #endif #ifdef MA sprintf(carg, "-DMA=%d ", MA); strcat(ctd, carg); #endif #ifdef SVDUMP strcat(ctd, "-DSVDUMP "); #endif #if defined(VECTORSZ) && !defined(TRIX) if (VECTORSZ != 1024) { sprintf(carg, "-DVECTORSZ=%d ", VECTORSZ); strcat(ctd, carg); } #endif #ifdef VERBOSE strcat(ctd, "-DVERBOSE "); #endif #ifdef CHECK strcat(ctd, "-DCHECK "); #endif #ifdef SDUMP strcat(ctd, "-DSDUMP "); #endif #if NCORE>1 sprintf(carg, "-DNCORE=%d ", NCORE); strcat(ctd, carg); #endif #ifdef VMAX if (VMAX != 256) { sprintf(carg, "-DVMAX=%d ", VMAX); strcat(ctd, carg); } #endif #ifdef PMAX if (PMAX != 16) { sprintf(carg, "-DPMAX=%d ", PMAX); strcat(ctd, carg); } #endif #ifdef QMAX if (QMAX != 16) { sprintf(carg, "-DQMAX=%d ", QMAX); strcat(ctd, carg); } #endif #ifdef SET_WQ_SIZE sprintf(carg, "-DSET_WQ_SIZE=%d ", SET_WQ_SIZE); strcat(ctd, carg); #endif printf("Compiled as: cc -o pan %span.c\n", ctd); } void active_procs(void) { if (reversing == 0) { Addproc(1, 1); } else { Addproc(1, 1); } } #ifdef MA #define TWIDTH 256 #define HASH(y,n) (n)*(((long)y)) #define INRANGE(e,h) ((h>=e->From && h<=e->To)||(e->s==1 && e->S==h)) extern char *emalloc(unsigned long); /* imported routine */ extern void dfa_init(ushort); /* 4 exported routines */ extern int dfa_member(ulong); extern int dfa_store(uchar *); extern void dfa_stats(void); typedef struct Edge { uchar From, To; /* max range 0..255 */ uchar s, S; /* if s=1, S is singleton */ struct Vertex *Dst; struct Edge *Nxt; } Edge; typedef struct Vertex { ulong key, num; /* key for splay tree, nr incoming edges */ uchar from[2], to[2]; /* in-node predefined edge info */ struct Vertex *dst[2];/* most nodes have 2 or more edges */ struct Edge *Succ; /* in case there are more edges */ struct Vertex *lnk, *left, *right; /* splay tree plumbing */ } Vertex; static Edge *free_edges; static Vertex *free_vertices; static Vertex **layers; /* one splay tree of nodes per layer */ static Vertex **path; /* run of word in the DFA */ static Vertex *R, *F, *NF; /* Root, Final, Not-Final */ static uchar *word, *lastword;/* string, and last string inserted */ static int dfa_depth, iv=0, nv=0, pfrst=0, Tally; static void insert_it(Vertex *, int); /* splay-tree code */ static void delete_it(Vertex *, int); static Vertex *find_it(Vertex *, Vertex *, uchar, int); static void recyc_edges(Edge *e) { if (!e) return; recyc_edges(e->Nxt); e->Nxt = free_edges; free_edges = e; } static Edge * new_edge(Vertex *dst) { Edge *e; if (free_edges) { e = free_edges; free_edges = e->Nxt; e->From = e->To = e->s = e->S = 0; e->Nxt = (Edge *) 0; } else e = (Edge *) emalloc(sizeof(Edge)); e->Dst = dst; return e; } static void recyc_vertex(Vertex *v) { recyc_edges(v->Succ); v->Succ = (Edge *) free_vertices; free_vertices = v; nr_states--; } static Vertex * new_vertex(void) { Vertex *v; if (free_vertices) { v = free_vertices; free_vertices = (Vertex *) v->Succ; v->Succ = (Edge *) 0; v->num = 0; } else v = (Vertex *) emalloc(sizeof(Vertex)); nr_states++; return v; } static Vertex * allDelta(Vertex *v, int n) { Vertex *dst = new_vertex(); v->from[0] = 0; v->to[0] = 255; v->dst[0] = dst; dst->num = 256; insert_it(v, n); return dst; } static void insert_edge(Vertex *v, Edge *e) { /* put new edge first */ if (!v->dst[0]) { v->dst[0] = e->Dst; v->from[0] = e->From; v->to[0] = e->To; recyc_edges(e); return; } if (!v->dst[1]) { v->from[1] = v->from[0]; v->from[0] = e->From; v->to[1] = v->to[0]; v->to[0] = e->To; v->dst[1] = v->dst[0]; v->dst[0] = e->Dst; recyc_edges(e); return; } /* shift */ { int f = v->from[1]; int t = v->to[1]; Vertex *d = v->dst[1]; v->from[1] = v->from[0]; v->from[0] = e->From; v->to[1] = v->to[0]; v->to[0] = e->To; v->dst[1] = v->dst[0]; v->dst[0] = e->Dst; e->From = f; e->To = t; e->Dst = d; } e->Nxt = v->Succ; v->Succ = e; } static void copyRecursive(Vertex *v, Edge *e) { Edge *f; if (e->Nxt) copyRecursive(v, e->Nxt); f = new_edge(e->Dst); f->From = e->From; f->To = e->To; f->s = e->s; f->S = e->S; f->Nxt = v->Succ; v->Succ = f; } static void copyEdges(Vertex *to, Vertex *from) { int i; for (i = 0; i < 2; i++) { to->from[i] = from->from[i]; to->to[i] = from->to[i]; to->dst[i] = from->dst[i]; } if (from->Succ) copyRecursive(to, from->Succ); } static Edge * cacheDelta(Vertex *v, int h, int first) { static Edge *ov, tmp; int i; if (!first && INRANGE(ov,h)) return ov; /* intercepts about 10% */ for (i = 0; i < 2; i++) if (v->dst[i] && h >= v->from[i] && h <= v->to[i]) { tmp.From = v->from[i]; tmp.To = v->to[i]; tmp.Dst = v->dst[i]; tmp.s = tmp.S = 0; ov = &tmp; return ov; } for (ov = v->Succ; ov; ov = ov->Nxt) if (INRANGE(ov,h)) return ov; Uerror("cannot get here, cacheDelta"); return (Edge *) 0; } static Vertex * Delta(Vertex *v, int h) /* v->delta[h] */ { Edge *e; if (v->dst[0] && h >= v->from[0] && h <= v->to[0]) return v->dst[0]; /* oldest edge */ if (v->dst[1] && h >= v->from[1] && h <= v->to[1]) return v->dst[1]; for (e = v->Succ; e; e = e->Nxt) if (INRANGE(e,h)) return e->Dst; Uerror("cannot happen Delta"); return (Vertex *) 0; } static void numDelta(Vertex *v, int d) { Edge *e; ulong cnt; int i; for (i = 0; i < 2; i++) if (v->dst[i]) { cnt = v->dst[i]->num + d*(1 + v->to[i] - v->from[i]); if (d == 1 && cnt < v->dst[i]->num) goto bad; v->dst[i]->num = cnt; } for (e = v->Succ; e; e = e->Nxt) { cnt = e->Dst->num + d*(1 + e->To - e->From + e->s); if (d == 1 && cnt < e->Dst->num) bad: Uerror("too many incoming edges"); e->Dst->num = cnt; } } static void setDelta(Vertex *v, int h, Vertex *newdst) /* v->delta[h] = newdst; */ { Edge *e, *f = (Edge *) 0, *g; int i; /* remove the old entry, if there */ for (i = 0; i < 2; i++) if (v->dst[i] && h >= v->from[i] && h <= v->to[i]) { if (h == v->from[i]) { if (h == v->to[i]) { v->dst[i] = (Vertex *) 0; v->from[i] = v->to[i] = 0; } else v->from[i]++; } else if (h == v->to[i]) { v->to[i]--; } else { g = new_edge(v->dst[i]);/* same dst */ g->From = v->from[i]; g->To = h-1; /* left half */ v->from[i] = h+1; /* right half */ insert_edge(v, g); } goto part2; } for (e = v->Succ; e; f = e, e = e->Nxt) { if (e->s == 1 && e->S == h) { e->s = e->S = 0; goto rem_tst; } if (h >= e->From && h <= e->To) { if (h == e->From) { if (h == e->To) { if (e->s) { e->From = e->To = e->S; e->s = 0; break; } else goto rem_do; } else e->From++; } else if (h == e->To) { e->To--; } else /* split */ { g = new_edge(e->Dst); /* same dst */ g->From = e->From; g->To = h-1; /* g=left half */ e->From = h+1; /* e=right half */ g->Nxt = e->Nxt; /* insert g */ e->Nxt = g; /* behind e */ break; /* done */ } rem_tst: if (e->From > e->To) { if (e->s == 0) { rem_do: if (f) f->Nxt = e->Nxt; else v->Succ = e->Nxt; e->Nxt = (Edge *) 0; recyc_edges(e); } else { e->From = e->To = e->S; e->s = 0; } } break; } } part2: /* check if newdst is already there */ for (i = 0; i < 2; i++) if (v->dst[i] == newdst) { if (h+1 == (int) v->from[i]) { v->from[i] = h; return; } if (h == (int) v->to[i]+1) { v->to[i] = h; return; } } for (e = v->Succ; e; e = e->Nxt) { if (e->Dst == newdst) { if (h+1 == (int) e->From) { e->From = h; if (e->s == 1 && e->S+1 == e->From) { e->From = e->S; e->s = e->S = 0; } return; } if (h == (int) e->To+1) { e->To = h; if (e->s == 1 && e->S == e->To+1) { e->To = e->S; e->s = e->S = 0; } return; } if (e->s == 0) { e->s = 1; e->S = h; return; } } } /* add as a new edge */ e = new_edge(newdst); e->From = e->To = h; insert_edge(v, e); } static ulong cheap_key(Vertex *v) { ulong vk2 = 0; if (v->dst[0]) { vk2 = (ulong) v->dst[0]; if ((ulong) v->dst[1] > vk2) vk2 = (ulong) v->dst[1]; } else if (v->dst[1]) vk2 = (ulong) v->dst[1]; if (v->Succ) { Edge *e; for (e = v->Succ; e; e = e->Nxt) if ((ulong) e->Dst > vk2) vk2 = (ulong) e->Dst; } Tally = (vk2>>2)&(TWIDTH-1); return v->key; } static ulong mk_key(Vertex *v) /* not sensitive to order */ { ulong m = 0, vk2 = 0; Edge *e; if (v->dst[0]) { m += HASH(v->dst[0], v->to[0] - v->from[0] + 1); vk2 = (ulong) v->dst[0]; } if (v->dst[1]) { m += HASH(v->dst[1], v->to[1] - v->from[1] + 1); if ((ulong) v->dst[1] > vk2) vk2 = (ulong) v->dst[1]; } for (e = v->Succ; e; e = e->Nxt) { m += HASH(e->Dst, e->To - e->From + 1 + e->s); if ((ulong) e->Dst > vk2) vk2 = (ulong) e->Dst; } Tally = (vk2>>2)&(TWIDTH-1); return m; } static ulong mk_special(int sigma, Vertex *n, Vertex *v) { ulong m = 0, vk2 = 0; Edge *f; int i; for (i = 0; i < 2; i++) if (v->dst[i]) { if (sigma >= v->from[i] && sigma <= v->to[i]) { m += HASH(v->dst[i], v->to[i]-v->from[i]); if ((ulong) v->dst[i] > vk2 && v->to[i] > v->from[i]) vk2 = (ulong) v->dst[i]; } else { m += HASH(v->dst[i], v->to[i]-v->from[i]+1); if ((ulong) v->dst[i] > vk2) vk2 = (ulong) v->dst[i]; } } for (f = v->Succ; f; f = f->Nxt) { if (sigma >= f->From && sigma <= f->To) { m += HASH(f->Dst, f->To - f->From + f->s); if ((ulong) f->Dst > vk2 && f->To - f->From + f->s > 0) vk2 = (ulong) f->Dst; } else if (f->s == 1 && sigma == f->S) { m += HASH(f->Dst, f->To - f->From + 1); if ((ulong) f->Dst > vk2) vk2 = (ulong) f->Dst; } else { m += HASH(f->Dst, f->To - f->From + 1 + f->s); if ((ulong) f->Dst > vk2) vk2 = (ulong) f->Dst; } } if ((ulong) n > vk2) vk2 = (ulong) n; Tally = (vk2>>2)&(TWIDTH-1); m += HASH(n, 1); return m; } void dfa_init(ushort nr_layers) { int i; Vertex *r, *t; dfa_depth = nr_layers; /* one byte per layer */ path = (Vertex **) emalloc((dfa_depth+1)*sizeof(Vertex *)); layers = (Vertex **) emalloc(TWIDTH*(dfa_depth+1)*sizeof(Vertex *)); lastword = (uchar *) emalloc((dfa_depth+1)*sizeof(uchar)); lastword[dfa_depth] = lastword[0] = 255; path[0] = R = new_vertex(); F = new_vertex(); for (i = 1, r = R; i < dfa_depth; i++, r = t) t = allDelta(r, i-1); NF = allDelta(r, i-1); } #if 0 static void complement_dfa(void) { Vertex *tmp = F; F = NF; NF = tmp; } #endif double tree_stats(Vertex *t) { Edge *e; double cnt=0.0; if (!t) return 0; if (!t->key) return 0; t->key = 0; /* precaution */ if (t->dst[0]) cnt++; if (t->dst[1]) cnt++; for (e = t->Succ; e; e = e->Nxt) cnt++; cnt += tree_stats(t->lnk); cnt += tree_stats(t->left); cnt += tree_stats(t->right); return cnt; } void dfa_stats(void) { int i, j; double cnt = 0.0; for (j = 0; j < TWIDTH; j++) for (i = 0; i < dfa_depth+1; i++) cnt += tree_stats(layers[i*TWIDTH+j]); printf("Minimized Automaton: %6lu nodes and %6g edges\n", nr_states, cnt); } int dfa_member(ulong n) { Vertex **p, **q; uchar *w = &word[n]; int i; p = &path[n]; q = (p+1); for (i = n; i < dfa_depth; i++) *q++ = Delta(*p++, *w++); return (*p == F); } int dfa_store(uchar *sv) { Vertex **p, **q, *s, *y, *old, *new = F; uchar *w, *u = lastword; int i, j, k; w = word = sv; while (*w++ == *u++) /* find first byte that differs */ ; pfrst = (int) (u - lastword) - 1; memcpy(&lastword[pfrst], &sv[pfrst], dfa_depth-pfrst); if (pfrst > iv) pfrst = iv; if (pfrst > nv) pfrst = nv; /* phase1: */ p = &path[pfrst]; q = (p+1); w = &word[pfrst]; for (i = pfrst; i < dfa_depth; i++) *q++ = Delta(*p++, *w++); /* (*p)->delta[*w++]; */ if (*p == F) return 1; /* it's already there */ /* phase2: */ iv = dfa_depth; do { iv--; old = new; new = find_it(path[iv], old, word[iv], iv); } while (new && iv > 0); /* phase3: */ nv = k = 0; s = path[0]; for (j = 1; j <= iv; ++j) if (path[j]->num > 1) { y = new_vertex(); copyEdges(y, path[j]); insert_it(y, j); numDelta(y, 1); delete_it(s, j-1); setDelta(s, word[j-1], y); insert_it(s, j-1); y->num = 1; /* initial value 1 */ s = y; path[j]->num--; /* only 1 moved from j to y */ k = 1; } else { s = path[j]; if (!k) nv = j; } y = Delta(s, word[iv]); y->num--; delete_it(s, iv); setDelta(s, word[iv], old); insert_it(s, iv); old->num++; for (j = iv+1; j < dfa_depth; j++) if (path[j]->num == 0) { numDelta(path[j], -1); delete_it(path[j], j); recyc_vertex(path[j]); } else break; return 0; } static Vertex * splay(ulong i, Vertex *t) { Vertex N, *l, *r, *y; if (!t) return t; N.left = N.right = (Vertex *) 0; l = r = &N; for (;;) { if (i < t->key) { if (!t->left) break; if (i < t->left->key) { y = t->left; t->left = y->right; y->right = t; t = y; if (!t->left) break; } r->left = t; r = t; t = t->left; } else if (i > t->key) { if (!t->right) break; if (i > t->right->key) { y = t->right; t->right = y->left; y->left = t; t = y; if (!t->right) break; } l->right = t; l = t; t = t->right; } else break; } l->right = t->left; r->left = t->right; t->left = N.right; t->right = N.left; return t; } static void insert_it(Vertex *v, int L) { Vertex *new, *t; ulong i; int nr; i = mk_key(v); nr = ((L*TWIDTH)+Tally); t = layers[nr]; v->key = i; if (!t) { layers[nr] = v; return; } t = splay(i, t); if (i < t->key) { new = v; new->left = t->left; new->right = t; t->left = (Vertex *) 0; } else if (i > t->key) { new = v; new->right = t->right; new->left = t; t->right = (Vertex *) 0; } else /* it's already there */ { v->lnk = t->lnk; /* put in linked list off v */ t->lnk = v; new = t; } layers[nr] = new; } static int checkit(Vertex *h, Vertex *v, Vertex *n, uchar sigma) { Edge *g, *f; int i, k, j = 1; for (k = 0; k < 2; k++) if (h->dst[k]) { if (sigma >= h->from[k] && sigma <= h->to[k]) { if (h->dst[k] != n) goto no_match; } for (i = h->from[k]; i <= h->to[k]; i++) { if (i == sigma) continue; g = cacheDelta(v, i, j); j = 0; if (h->dst[k] != g->Dst) goto no_match; if (g->s == 0 || g->S != i) i = g->To; } } for (f = h->Succ; f; f = f->Nxt) { if (INRANGE(f,sigma)) { if (f->Dst != n) goto no_match; } for (i = f->From; i <= f->To; i++) { if (i == sigma) continue; g = cacheDelta(v, i, j); j = 0; if (f->Dst != g->Dst) goto no_match; if (g->s == 1 && i == g->S) continue; i = g->To; } if (f->s && f->S != sigma) { g = cacheDelta(v, f->S, 1); if (f->Dst != g->Dst) goto no_match; } } if (h->Succ || h->dst[0] || h->dst[1]) return 1; no_match: return 0; } static Vertex * find_it(Vertex *v, Vertex *n, uchar sigma, int L) { Vertex *z, *t; ulong i; int nr; i = mk_special(sigma,n,v); nr = ((L*TWIDTH)+Tally); t = layers[nr]; if (!t) return (Vertex *) 0; layers[nr] = t = splay(i, t); if (i == t->key) for (z = t; z; z = z->lnk) if (checkit(z, v, n, sigma)) return z; return (Vertex *) 0; } static void delete_it(Vertex *v, int L) { Vertex *x, *t; ulong i; int nr; i = cheap_key(v); nr = ((L*TWIDTH)+Tally); t = layers[nr]; if (!t) return; t = splay(i, t); if (i == t->key) { Vertex *z, *y = (Vertex *) 0; for (z = t; z && z != v; y = z, z = z->lnk) ; if (z != v) goto bad; if (y) { y->lnk = z->lnk; z->lnk = (Vertex *) 0; layers[nr] = t; return; } else if (z->lnk) /* z == t == v */ { y = z->lnk; y->left = t->left; y->right = t->right; t->left = t->right = t->lnk = (Vertex *) 0; layers[nr] = y; return; } /* delete the node itself */ if (!t->left) { x = t->right; } else { x = splay(i, t->left); x->right = t->right; } t->left = t->right = t->lnk = (Vertex *) 0; layers[nr] = x; return; } bad: Uerror("cannot happen delete"); } #endif #if defined(MA) && (defined(W_XPT) || defined(R_XPT)) static Vertex **temptree; static char wbuf[4096]; static int WCNT = 4096, wcnt=0; static uchar stacker[MA+1]; static ulong stackcnt = 0; extern double nstates, nlinks, truncs, truncs2; static void xwrite(int fd, char *b, int n) { if (wcnt+n >= 4096) { write(fd, wbuf, wcnt); wcnt = 0; } memcpy(&wbuf[wcnt], b, n); wcnt += n; } static void wclose(int fd) { if (wcnt > 0) write(fd, wbuf, wcnt); wcnt = 0; close(fd); } static void w_vertex(int fd, Vertex *v) { char t[3]; int i; Edge *e; xwrite(fd, (char *) &v, sizeof(Vertex *)); t[0] = 0; for (i = 0; i < 2; i++) if (v->dst[i]) { t[1] = v->from[i], t[2] = v->to[i]; xwrite(fd, t, 3); xwrite(fd, (char *) &(v->dst[i]), sizeof(Vertex *)); } for (e = v->Succ; e; e = e->Nxt) { t[1] = e->From, t[2] = e->To; xwrite(fd, t, 3); xwrite(fd, (char *) &(e->Dst), sizeof(Vertex *)); if (e->s) { t[1] = t[2] = e->S; xwrite(fd, t, 3); xwrite(fd, (char *) &(e->Dst), sizeof(Vertex *)); } } } static void w_layer(int fd, Vertex *v) { uchar c=1; if (!v) return; xwrite(fd, (char *) &c, 1); w_vertex(fd, v); w_layer(fd, v->lnk); w_layer(fd, v->left); w_layer(fd, v->right); } void w_xpoint(void) { int fd; char nm[64]; int i, j; uchar c; static uchar xwarned = 0; sprintf(nm, "%s.xpt", PanSource); if ((fd = creat(nm, 0666)) <= 0) if (!xwarned) { xwarned = 1; printf("cannot creat checkpoint file\n"); return; } xwrite(fd, (char *) &nstates, sizeof(double)); xwrite(fd, (char *) &truncs, sizeof(double)); xwrite(fd, (char *) &truncs2, sizeof(double)); xwrite(fd, (char *) &nlinks, sizeof(double)); xwrite(fd, (char *) &dfa_depth, sizeof(int)); xwrite(fd, (char *) &R, sizeof(Vertex *)); xwrite(fd, (char *) &F, sizeof(Vertex *)); xwrite(fd, (char *) &NF, sizeof(Vertex *)); for (j = 0; j < TWIDTH; j++) for (i = 0; i < dfa_depth+1; i++) { w_layer(fd, layers[i*TWIDTH+j]); c = 2; xwrite(fd, (char *) &c, 1); } wclose(fd); } static void xread(int fd, char *b, int n) { int m = wcnt; int delta = 0; if (m < n) { if (m > 0) memcpy(b, &wbuf[WCNT-m], m); delta = m; WCNT = wcnt = read(fd, wbuf, 4096); if (wcnt < n-m) Uerror("xread failed -- insufficient data"); n -= m; } memcpy(&b[delta], &wbuf[WCNT-wcnt], n); wcnt -= n; } static void x_cleanup(Vertex *c) { Edge *e; /* remove the tree and edges from c */ if (!c) return; for (e = c->Succ; e; e = e->Nxt) x_cleanup(e->Dst); recyc_vertex(c); } static void x_remove(void) { Vertex *tmp; int i, s; int r, j; /* double-check: */ stacker[dfa_depth-1] = 0; r = dfa_store(stacker); stacker[dfa_depth-1] = 4; j = dfa_member(dfa_depth-1); if (r != 1 || j != 0) { printf("%lu: ", stackcnt); for (i = 0; i < dfa_depth; i++) printf("%d,", stacker[i]); printf(" -- not a stackstate \n", r, j); return; } stacker[dfa_depth-1] = 1; s = dfa_member(dfa_depth-1); { tmp = F; F = NF; NF = tmp; } /* complement */ if (s) dfa_store(stacker); stacker[dfa_depth-1] = 0; dfa_store(stacker); stackcnt++; { tmp = F; F = NF; NF = tmp; } } static void x_rm_stack(Vertex *t, int k) { int j; Edge *e; if (k == 0) { x_remove(); return; } if (t) for (e = t->Succ; e; e = e->Nxt) { for (j = e->From; j <= (int) e->To; j++) { stacker[k] = (uchar) j; x_rm_stack(e->Dst, k-1); } if (e->s) { stacker[k] = e->S; x_rm_stack(e->Dst, k-1); } } } static Vertex * insert_withkey(Vertex *v, int L) { Vertex *new, *t = temptree[L]; if (!t) { temptree[L] = v; return v; } t = splay(v->key, t); if (v->key < t->key) { new = v; new->left = t->left; new->right = t; t->left = (Vertex *) 0; } else if (v->key > t->key) { new = v; new->right = t->right; new->left = t; t->right = (Vertex *) 0; } else { if (t != R && t != F && t != NF) Uerror("double insert, bad checkpoint data"); else { recyc_vertex(v); new = t; } } temptree[L] = new; return new; } static Vertex * find_withkey(Vertex *v, int L) { Vertex *t = temptree[L]; if (t) { temptree[L] = t = splay((ulong) v, t); if (t->key == (ulong) v) return t; } Uerror("not found error, bad checkpoint data"); return (Vertex *) 0; } void r_layer(int fd, int n) { Vertex *v; Edge *e; char c, t[2]; for (;;) { xread(fd, &c, 1); if (c == 2) break; if (c == 1) { v = new_vertex(); xread(fd, (char *) &(v->key), sizeof(Vertex *)); v = insert_withkey(v, n); } else /* c == 0 */ { e = new_edge((Vertex *) 0); xread(fd, t, 2); e->From = t[0]; e->To = t[1]; xread(fd, (char *) &(e->Dst), sizeof(Vertex *)); insert_edge(v, e); } } } static void v_fix(Vertex *t, int nr) { int i; Edge *e; if (!t) return; for (i = 0; i < 2; i++) if (t->dst[i]) t->dst[i] = find_withkey(t->dst[i], nr); for (e = t->Succ; e; e = e->Nxt) e->Dst = find_withkey(e->Dst, nr); v_fix(t->left, nr); v_fix(t->right, nr); } static void v_insert(Vertex *t, int nr) { Edge *e; int i; if (!t) return; v_insert(t->left, nr); v_insert(t->right, nr); /* remove only leafs from temptree */ t->left = t->right = t->lnk = (Vertex *) 0; insert_it(t, nr); /* into layers */ for (i = 0; i < 2; i++) if (t->dst[i]) t->dst[i]->num += (t->to[i] - t->from[i] + 1); for (e = t->Succ; e; e = e->Nxt) e->Dst->num += (e->To - e->From + 1 + e->s); } static void x_fixup(void) { int i; for (i = 0; i < dfa_depth; i++) v_fix(temptree[i], (i+1)); for (i = dfa_depth; i >= 0; i--) v_insert(temptree[i], i); } static Vertex * x_tail(Vertex *t, ulong want) { int i, yes, no; Edge *e; Vertex *v = (Vertex *) 0; if (!t) return v; yes = no = 0; for (i = 0; i < 2; i++) if ((ulong) t->dst[i] == want) { /* was t->from[i] <= 0 && t->to[i] >= 0 */ /* but from and to are uchar */ if (t->from[i] == 0) yes = 1; else if (t->from[i] <= 4 && t->to[i] >= 4) no = 1; } for (e = t->Succ; e; e = e->Nxt) if ((ulong) e->Dst == want) { /* was INRANGE(e,0) but From and To are uchar */ if ((e->From == 0) || (e->s==1 && e->S==0)) yes = 1; else if (INRANGE(e, 4)) no = 1; } if (yes && !no) return t; v = x_tail(t->left, want); if (v) return v; v = x_tail(t->right, want); if (v) return v; return (Vertex *) 0; } static void x_anytail(Vertex *t, Vertex *c, int nr) { int i; Edge *e, *f; Vertex *v; if (!t) return; for (i = 0; i < 2; i++) if ((ulong) t->dst[i] == c->key) { v = new_vertex(); v->key = t->key; f = new_edge(v); f->From = t->from[i]; f->To = t->to[i]; f->Nxt = c->Succ; c->Succ = f; if (nr > 0) x_anytail(temptree[nr-1], v, nr-1); } for (e = t->Succ; e; e = e->Nxt) if ((ulong) e->Dst == c->key) { v = new_vertex(); v->key = t->key; f = new_edge(v); f->From = e->From; f->To = e->To; f->s = e->s; f->S = e->S; f->Nxt = c->Succ; c->Succ = f; x_anytail(temptree[nr-1], v, nr-1); } x_anytail(t->left, c, nr); x_anytail(t->right, c, nr); } static Vertex * x_cpy_rev(void) { Vertex *c, *v; /* find 0 and !4 predecessor of F */ v = x_tail(temptree[dfa_depth-1], F->key); if (!v) return (Vertex *) 0; c = new_vertex(); c->key = v->key; /* every node on dfa_depth-2 that has v->key as succ */ /* make copy and let c point to these (reversing ptrs) */ x_anytail(temptree[dfa_depth-2], c, dfa_depth-2); return c; } void r_xpoint(void) { int fd; char nm[64]; Vertex *d; int i, j; wcnt = 0; sprintf(nm, "%s.xpt", PanSource); if ((fd = open(nm, 0)) < 0) /* O_RDONLY */ Uerror("cannot open checkpoint file"); xread(fd, (char *) &nstates, sizeof(double)); xread(fd, (char *) &truncs, sizeof(double)); xread(fd, (char *) &truncs2, sizeof(double)); xread(fd, (char *) &nlinks, sizeof(double)); xread(fd, (char *) &dfa_depth, sizeof(int)); if (dfa_depth != MA+a_cycles) Uerror("bad dfa_depth in checkpoint file"); path = (Vertex **) emalloc((dfa_depth+1)*sizeof(Vertex *)); layers = (Vertex **) emalloc(TWIDTH*(dfa_depth+1)*sizeof(Vertex *)); temptree = (Vertex **) emalloc((dfa_depth+2)*sizeof(Vertex *)); lastword = (uchar *) emalloc((dfa_depth+1)*sizeof(uchar)); lastword[dfa_depth] = lastword[0] = 255; path[0] = R = new_vertex(); xread(fd, (char *) &R->key, sizeof(Vertex *)); R = insert_withkey(R, 0); F = new_vertex(); xread(fd, (char *) &F->key, sizeof(Vertex *)); F = insert_withkey(F, dfa_depth); NF = new_vertex(); xread(fd, (char *) &NF->key, sizeof(Vertex *)); NF = insert_withkey(NF, dfa_depth); for (j = 0; j < TWIDTH; j++) for (i = 0; i < dfa_depth+1; i++) r_layer(fd, i); if (wcnt != 0) Uerror("bad count in checkpoint file"); d = x_cpy_rev(); x_fixup(); stacker[dfa_depth-1] = 0; x_rm_stack(d, dfa_depth-2); x_cleanup(d); close(fd); printf("pan: removed %lu stackstates\n", stackcnt); nstates -= (double) stackcnt; } #endif void c_globals(void) { /* int i; */ printf("global vars:\n"); { int l_in; for (l_in = 0; l_in < 4; l_in++) { printf(" byte fork[%d]: %d\n", l_in, now.fork[l_in]); } } } void c_locals(int pid, int tp) { /* int i; */ switch(tp) { case 1: printf("local vars proc %d (:init:):\n", pid); printf(" byte k: %d\n", ((P1 *)pptr(pid))->k); break; case 0: printf("local vars proc %d (Philosopher):\n", pid); printf(" byte i: %d\n", ((P0 *)pptr(pid))->i); break; } } void printm(int x, char *s) { if (!s) { s = "_unnamed_"; } } void c_chandump(int unused) { unused++; /* avoid complaints */ } Trans *t_id_lkup[47]; #ifdef BFS_PAR #include "pan.p" #endif /* end of pan.c */