//#define REORG
#define TARGET_ARGS_DELETE
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Threading;
using System.Runtime.InteropServices;
using miew.Debugging;
using miew.String;
using miew.Enumerable;
#if ILFUNC
using IlFunc;
#endif
#pragma warning disable 0169
namespace agree
{
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
[DebuggerDisplay("{ToString(),nq}")]
[StructLayout(LayoutKind.Explicit)]
public struct arr_tfs_entry
{
[FieldOffset(0)]
public ulong fm_ul;
[FieldOffset(0)]
public FeatMark fm;
[FieldOffset(0)]
public int i_feat;
[FieldOffset(4)]
public int mark;
[FieldOffset(8)]
public ulong e_ul;
[FieldOffset(8)]
public Edge e;
[FieldOffset(8)]
public Edge.Flag e_FlagsId; // 8
[FieldOffset(12)]
public int e_Mark; // 12
#if DEBUG
public override string ToString()
{
Grammar g = Grammar._singleton;
TypeMgr tm;
FeatureInfo[] rgfi;
String f;
String nxt = "";// next == -1 ? "" : next.ToString("0000");
if (g == null || (tm = g.tm) == null || (rgfi = tm.feat_arr) == null || i_feat >= rgfi.Length || (f = rgfi[i_feat].feature) == null)
return String.Format("{0} {1} {2} {3}", i_feat, mark, e.ToString(), nxt);
String s = mark + "/" + rgfi[i_feat].feature.ToUpper();
return String.Format("{0,20} {1,30} {2}", s, e, nxt);
}
#endif
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// ArrayTfs : low-impact, flat array edge storage
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public unsafe partial class ArrayTfs : Tfs
{
const int c_hashes = 256;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// construct an ArrayTfs from a BootstrapTfs
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
unsafe public ArrayTfs(BootstrapTfs dtfs)
: base(dtfs.tm, dtfs.Edge)
{
int i, c_src = dtfs.d.Count;
this.entries = new arr_tfs_entry[c_src];
this.rg_next = new ushort[c_src];
this.hidx = new ushort[c_hashes];
fixed (ushort* _h = hidx)
fixed (arr_tfs_entry* _base = entries)
fixed (ushort* _rgn = rg_next)
{
arr_tfs_entry* pate = _base;
foreach (var kvp in dtfs.d)
{
pate->i_feat = (int)(kvp.Key >> 32);
pate->mark = (int)kvp.Key;
pate->e = kvp.Value;
pate++;
}
int c_corefs = 0;
Debug.Assert(next_coref_mark == -1);
pate = _base;
for (i = 0; i < entries.Length; i++)
{
int m_old = pate->e_Mark;
if (pate->e_FlagsId < 0 && m_old > 0)
{
int m_new = next_coref_mark--;
c_corefs++;
arr_tfs_entry* pate2 = _base;
for (int j = 0; j < entries.Length; j++)
{
if (pate2->mark == m_old)
pate2->mark = m_new;
if (pate2->e_Mark == m_old)
pate2->e_Mark = m_new;
pate2++;
}
}
pate++;
}
coref_tally_map = c_corefs == 0 ? Tfs.NoCorefsTallyMap : new byte[c_corefs];
ushort* pn = _rgn;
pate = _base;
for (i = 0; i < entries.Length; )
{
int m = pate->e_Mark;
if (m < 0)
coref_tally_map[~m]++;
i++;
byte v = (byte)(pate->i_feat + pate->mark);
*pn++ = _h[v];
_h[v] = (ushort)i;
pate++;
}
}
this.next_mark = dtfs.next_mark;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// construct an ArrayTfs from a BootstrapTfs-style Edge dictionary and coreference overrides from a TargetTfs
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public unsafe ArrayTfs(Allocator ctrl, TargetTfs tt, int[] rg_feat_delete)
: base(ctrl.tm, tt.Edge)
{
Debug.Assert(tt.d.Count > 0);
#if TARGET_ARGS_DELETE
/// Optionally delete daughter features from a finished mother rule
if (rg_feat_delete != null)
{
foreach (int i_feat in rg_feat_delete)
if (tt._prune_below(i_feat, tt.Edge.Mark))
tt.SetEdge(i_feat, tt.Edge.Mark, Edge.PrunedEdge);
}
#endif
#if false
/// To detach scratch slots from the TFS nodes, agree's quasi-destructive unification essentially piggybacks
/// the arr_tfs_entry layout/indexing of this or any ArrayTfs to use as a pre-made mapping between Feat/Mark
/// tuples to its own scratch slot indexes. However, this requires that _some_ argument TFS be able to provide
/// a node which has a scratch slot for every (possible) appropriate feature of a type unification result.
/// This is seemingly guaranteed by the well-formedness check, but places an obscure requirement on the
/// storage of that GLB type's expanded TFS's topmost node (only). Because *top* is normally not stored, if
/// the expanded TFS has an unconstrained feature, there would be no scratch slot in the mapping. Therefore,
/// here we manually add *top* to fill-out the topmost node.
/// WARNING: Currently, this is only activated for ArrayTfs created from bootstrap/TargetTfs, because the QD
/// requirement only applies to TFSes introduced as well-formedness TFSes, which are always canonical
/// expanded TFSes, which are always created this way.
Debug.Assert(Edge.Mark == 1);
ushort* pfix = tm.rgpfix[(int)(Edge.FlagsId & Edge.Flag.MultiIdMask)];
ushort c_topmost_max = *pfix++;
for (ushort k = 0; k < c_topmost_max; k++, pfix++)
if (!tt.ContainsFeatMark(*pfix, 1))
tt.AddEdge(*pfix, 1, default(Edge));
#endif
long ttid = (long)tt.id << 32;
int c_src = tt.d.Count;
this.entries = new arr_tfs_entry[c_src];
this.rg_next = new ushort[c_src];
int i;
int* mark_remap = stackalloc int[tt.next_mark];
for (i = 0; i < tt.next_mark; i++)
mark_remap[i] = i;
this.next_mark = tt.next_mark;
Debug.Assert(next_coref_mark == -1);
//this.next_coref_mark = tt.next_coref_mark;
//int min_coref_in_mark = 0;
coref_tally_map = new byte[tt.c_corefs];
this.hidx = new ushort[c_hashes];
fixed (ushort* _h = hidx)
fixed (arr_tfs_entry* _base = entries)
fixed (ushort* _rgn = rg_next)
{
arr_tfs_entry* pate = _base;
foreach (var kvp in tt.d)
{
pate->i_feat = (int)(kvp.Key >> 32);
pate->mark = (int)kvp.Key;
pate->e = kvp.Value;
if (pate->e_FlagsId < 0)
{
Unification.UnifCoref uc = tt.corefs[ttid | (uint)pate->e_Mark];
while (uc.f_redirect)
uc = uc.next;
Edge e = uc.e_cur;
if (uc.c_fm == 1)
{
Edge.Flag nf = e.FlagsId & ~Edge.Flag.Coreference;
#if DEBUG
uc.single_fm = default(FeatMark);
#endif
uc.c_fm = 0;
tt.c_corefs--; /// note: there will be a gap in the tally table
if (nf == 0)
continue;
e = new Edge(nf, (nf & Edge.Flag.EtmNonBareType) == 0 ? 0 : e.Mark);
}
else
{
if (e.Mark > 0)
uc.e_cur = e = new Edge(e.FlagsId, next_coref_mark--);
coref_tally_map[~e.Mark]++;
}
pate->e = e;
mark_remap[kvp.Value.Mark] = pate->e_Mark;
}
pate++;
}
tt.d = null;
tt.corefs = null;
int c = (int)(pate - _base);
ushort* pn = _rgn;
pate = _base;
for (i = 0; i < c; i++)
{
int m = pate->mark = mark_remap[pate->mark];
//if (m < min_coref_in_mark)
// min_coref_in_mark = m;
byte v = (byte)(pate->i_feat + m);
*pn++ = _h[v];
_h[v] = (ushort)(i + 1);
pate++;
}
}
Debug.Assert(tt.c_corefs == -1 - next_coref_mark);
Debug.Assert(mark_remap[this.Edge.Mark] == this.Edge.Mark);
/// (rare: 'continue' case above; coreferencing on *top* was vacuous and thus edge was not stored)
if (i < entries.Length && Grammar.f_garbage)
Array.Resize(ref entries, i);
// BuildNodeIndex(ctrl, i, next_mark, next_coref_mark, min_coref_in_mark, true);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// ArrayTfs : low-impact, flat array edge storage
/// </summary>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public unsafe ArrayTfs(Allocator ctrl, Edge e_top, arr_tfs_entry* _p_ate, int c_actual, int next_mark, int next_coref_mark, int min_coref_in_mark, bool f_restricted)
: base(ctrl.tm, e_top)
{
Debug.Assert(c_actual > 0);
this.next_mark = next_mark;
this.next_coref_mark = next_coref_mark;
this.entries = new arr_tfs_entry[c_actual];
this.rg_next = new ushort[c_actual];
this.coref_tally_map = new byte[-(next_coref_mark + 1)];
min_coref_in_mark = -min_coref_in_mark;
int fpt = tm.fcm.MaxFeaturesPerType;
int c_mark_range = min_coref_in_mark + next_mark;
#if NODE_INDEX
int c_in_mark_usage = 0;
int* nie_feat_counts = stackalloc int[c_mark_range];
NodeIndexEntry* nie_base = stackalloc NodeIndexEntry[fpt * c_mark_range];
#endif
#if REORG
byte* counts = stackalloc byte[c_mark_range];
arr_tfs_entry** bymark = stackalloc arr_tfs_entry*[fpt * c_mark_range];
#endif
this.hidx = new ushort[c_hashes];
fixed (ushort* _h = hidx)
fixed (arr_tfs_entry* _base = entries)
fixed (byte* _cx = coref_tally_map)
fixed (ushort* _rgn = rg_next)
{
#if REORG
for (arr_tfs_entry* src = _p_ate; src < _p_ate + c_actual; src++)
{
if (src->mark < -min_coref_in_mark || src->mark >= next_mark)
throw new Exception();
int ient = src->mark + min_coref_in_mark;
int c_cur = counts[ient]++;
bymark[(ient * fpt) + c_cur] = src;
}
#elif ILFUNC
CopyMemory(_base, _p_ate, c_actual * sizeof(arr_tfs_entry));
#else
Kernel32.CopyMemory(_base, _p_ate, c_actual * sizeof(arr_tfs_entry));
#endif
#if REORG
//arr_tfs_entry[][] _dbg = new arr_tfs_entry[c_mark_range][];
//for (int j = 0; j < c_mark_range; j++)
//{
// if (counts[j] > 0)
// {
// _dbg[j] = new arr_tfs_entry[counts[j]];
// for (int k = 0; k < counts[j]; k++)
// _dbg[j][k] = *bymark[(j * fpt) + k];
// }
//}
#endif
ushort i = 1;
ushort* pus;
ushort* pi_nxt = _rgn;
arr_tfs_entry* dst = _base, end = _base + c_actual;
#if REORG
for (int j = 0; j < c_mark_range; j++)
{
byte c = counts[j];
for (int k = 0; k < c; k++)
#else
{
while (dst < end)
#endif
{
if (dst->e_FlagsId == 0) // not easy to remove it at this point, so instead just don't hash it
Debug.Assert(dst->e_Mark == 0);
else
{
#if REORG
*dst = *bymark[(j * fpt) + k];
#endif
int m = dst->e_Mark;
if (m < 0)
_cx[~m]++;
m = dst->mark;
#if NODE_INDEX
int ient = m + min_coref_in_mark;
int c_cur = nie_feat_counts[ient]++;
if (c_cur == 0)
c_in_mark_usage++;
NodeIndexEntry* _pnie = &nie_base[(ient * fpt) + c_cur];
_pnie->i_feat = (ushort)dst->i_feat;
_pnie->out_mark = (short)dst->e_Mark;
_pnie->f = dst->e_FlagsId;
#endif
*pi_nxt = *(pus = _h + (byte)(dst->i_feat + m));
*pus = i;
}
i++;
pi_nxt++;
dst++;
}
}
}
#if NODE_INDEX
int c_ni_alloc = NodeIndex.NodeSize(c_actual, c_mark_range, c_in_mark_usage);
p_node_index = (NodeIndex*)ctrl.Alloc(c_ni_alloc);
NodeIndex* pni = p_node_index;
pni->add_to_mark = min_coref_in_mark;
pni->c_limit = c_mark_range;
pni->c_entries = c_actual;
pni->c_in_mark_usage = c_in_mark_usage;
pni->c_mark_range = c_mark_range;
NodeIndexEntry** ppnie = (NodeIndexEntry**)(pni + 1);
NodeIndexEntry* pnie_dst = (NodeIndexEntry*)&ppnie[c_mark_range];
*(long*)pnie_dst = -1;
pnie_dst++;
for (int i = 0; i < c_mark_range; i++)
{
int c = nie_feat_counts[i];
if (c == 0)
*ppnie = pnie_dst - 1;
else
{
*ppnie = pnie_dst;
NodeIndexEntry* pnie_src = nie_base + (i * fpt);
for (int j = 0; j < c; j++)
*pnie_dst++ = *pnie_src++;
*(long*)pnie_dst = -1;
pnie_dst++;
}
ppnie++;
}
#endif
if (f_restricted)
{
coref_tally_map_restricted = coref_tally_map;
this.f_restricted = true;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// ArrayTfs : low-impact, flat array edge storage
/// </summary>
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public unsafe ArrayTfs(
Allocator ctrl,
Edge e_top,
arr_tfs_entry[] entries,
ushort[] hidx,
ushort[] rgn,
int next_mark,
int next_coref_mark,
bool f_restricted)
: base(ctrl.tm, e_top)
{
this.next_mark = next_mark;
this.next_coref_mark = next_coref_mark;
this.entries = entries;
this.rg_next = rgn;
this.hidx = hidx;
this.f_restricted = f_restricted;
this.coref_tally_map = new byte[-(next_coref_mark + 1)];
/// n-way needs coreference tallies
Config.Parser pcfg = ctrl.tm.config.parser;
if (pcfg.unifier == Config.Parser.UnifierType.n_way ||
pcfg.checker == Config.Parser.UnifierType.n_way ||
pcfg.f_variadic_unpack)
{
fixed (arr_tfs_entry* __pate = entries)
fixed (byte* _ctm = coref_tally_map)
{
int m;
arr_tfs_entry* pate = __pate, pate_end = __pate + entries.Length;
do
if ((m = pate->e_Mark) < 0)
(*(_ctm - m - 1))++;
while (++pate < pate_end);
}
if (f_restricted)
coref_tally_map_restricted = coref_tally_map;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// clone an ArrayTfs (or derived instance)
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public ArrayTfs(Tfs to_clone)
: base(to_clone.tm, to_clone.Edge)
{
ArrayTfs a = to_clone as ArrayTfs;
if (a == null)
throw new Exception(String.Format("ArrayTfs: don't know how to clone {0}", this.GetType().Name));
/// ok to steal pointer to nodes because it's managed by ParseControl
#if NODE_INDEX
this.p_node_index = a.p_node_index;
#endif
this.entries = a.entries;
this.rg_next = a.rg_next;
this.coref_tally_map = a.coref_tally_map;
this.f_restricted = a.f_restricted;
this.hidx = a.hidx;
this.next_mark = a.next_mark;
this.next_coref_mark = a.next_coref_mark;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public
arr_tfs_entry[] entries;
#if NODE_INDEX
public NodeIndex* p_node_index;
#endif
bool f_restricted = false;
public
ushort[] hidx;
public
ushort[] rg_next;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public unsafe override bool TryGetEdge(int i_feat, int mark, out Edge e)
{
Debug.Assert(mark != 0);
int ix;
if ((ix = hidx[(byte)(i_feat + mark)]) != 0)
{
{
ulong ul = ((ulong)mark << 32) | (uint)i_feat;
fixed (arr_tfs_entry* _pe = entries)
{
arr_tfs_entry* pe;
do
{
if (*(ulong*)(pe = _pe + ix - 1) == ul)
{
e = pe->e;
return true;
}
}
while ((ix = rg_next[ix - 1]) != 0);
}
}
}
e = default(Edge);
return false;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#if false
[ILFunc(@"
.locals init (
[0] int32 ix,
[1] valuetype agree.arr_tfs_entry& pinned _pe,
[2] valuetype agree.arr_tfs_entry* pe)
ldarg.0
ldfld int32[] agree.ArrayTfs::hidx
ldarg.1
ldarg.2
add // xor
conv.u1
ldelem.i4
dup
stloc.0
brfalse.s exit_not_found
ldarg.0
ldfld valuetype agree.arr_tfs_entry[] agree.ArrayTfs::entries
ldc.i4.0
ldelema agree.arr_tfs_entry
stloc.1
loop_start: ldloc.1 // pe = _pe + ix - 1
conv.u
ldloc.0 // ix--
ldc.i4.1
sub
dup
stloc.0
ldc.i4.4
shl
add
dup
stloc.2
ldind.i4 // if (*(uint*)pe == feat)
ldarg.1
bne.un.s IL_0067
ldloc.2 // if (((uint*)pe)[1] == mark)
ldc.i4.4
conv.u
add
ldind.i4
ldarg.2
bne.un.s IL_0067
ldarg.3 // *mark = pe->e.Mark
ldloc.2
ldc.i4 12
conv.u
add
ldind.i4
stind.i4
ldloc.2 // (return) pe->e.FlagsId
ldc.i4 8
conv.u
add
ldind.i4
ldc.i4.0 // release pin
conv.u
stloc.1
ret
IL_0067:
ldarg.0
ldfld int32[] agree.ArrayTfs::rg_next
ldloc.0
ldelem int32
dup
stloc.0
brtrue.s loop_start
ldc.i4.0 // release pin
conv.u
stloc.1
exit_not_found:
ldarg.3 // *mark = 0
ldc.i4.0
stind.i4
ldc.i4.0 // return 0
ret
")]
#endif
public override Edge.Flag TryGetFlagsMark(int i_feat, int mark, out int m)
{
Debug.Assert(mark != 0);
int ix;
if ((ix = hidx[(byte)(i_feat + mark)]) != 0)
{
ulong ul = ((ulong)mark << 32) | (uint)i_feat;
fixed (arr_tfs_entry* _pe = entries)
{
arr_tfs_entry* pe;
do
{
if (*(ulong*)(pe = _pe + ix - 1) == ul)
{
m = pe->e_Mark;
return pe->e_FlagsId;
}
}
while ((ix = rg_next[ix - 1]) != 0);
}
}
m = 0;
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public int GetEdgeIndex(int i_feat, ref int mark)
{
Debug.Assert(mark != 0);
int mix;
if ((mix = hidx[(byte)(i_feat + mark)]) != 0)
{
fixed (arr_tfs_entry* _pe = entries)
{
arr_tfs_entry* pe;
do
if ((pe = _pe + mix - 1)->i_feat == i_feat && pe->mark == mark)
{
if (pe->e_FlagsId == Edge.Flag.PrunedDuringParsing)
return 0;
mark = pe->e_Mark;
return mix;
}
while ((mix = rg_next[mix - 1]) != 0);
}
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public override ulong GetUlEdge(FeatMark fm)
{
int ix;
if ((ix = hidx[(byte)(fm.i_feat + fm.m)]) != 0)
{
fixed (arr_tfs_entry* _pe = entries)
{
ulong* pul;
do
{
if (*(pul = (ulong*)(_pe + ix - 1)) == *(ulong*)&fm)
return *(pul + 1);
}
while ((ix = rg_next[ix - 1]) != 0);
}
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public override Edge GetEdge(int i_feat, int mark)
{
Debug.Assert(mark != 0);
int ix;
if ((ix = hidx[(byte)(i_feat + mark)]) != 0)
{
arr_tfs_entry ent;
{
do
{
ix--;
ent = entries[ix];
if (ent.mark == mark && ent.i_feat == i_feat)
return ent.e;
}
while ((ix = rg_next[ix]) != 0);
}
}
return default(Edge);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public override bool ContainsFeatMark(int i_feat, int mark)
{
Debug.Assert(mark != 0);
int ix;
if ((ix = hidx[(byte)(i_feat + mark)]) != 0)
{
arr_tfs_entry ent;
{
do
{
ix--;
ent = entries[ix];
if (ent.mark == mark && ent.i_feat == i_feat)
return true;
}
while ((ix = rg_next[ix]) != 0);
}
}
return false;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public override void SetEdge(int i_feat, int mark, Edge e)
{
Debug.Assert(mark != 0);
int ix;
if ((ix = hidx[(byte)(i_feat + mark)]) != 0)
{
{
do
{
ix--;
if (entries[ix].mark == mark && entries[ix].i_feat == i_feat)
{
entries[ix].e_FlagsId = e.FlagsId;
entries[ix].e_Mark = e.Mark;
return;
}
}
while ((ix = rg_next[ix]) != 0);
}
}
throw new InvalidOperationException();
}
#if false
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
/// Prune a sub-graph, and then remove any unused coreferences (and unmark the edge coreference bit) which
/// have only a single use remaining.
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public bool _prune_below(int i_feat, int m)
{
Edge e;
if (!TryGetEdge(i_feat, m, out e))
return false;
/// only remove *below* a coreferenced edge if it has no reentrancies remaining. This allows sub-graphs to
/// be pruned without disturbing the coreferenced portions that are still accessible from without.
if (e.FlagsId >= 0 || coref_tally_map[e.Mark] == 0)
{
if ((e.FlagsId & agree.Edge.Flag.EtmNonBareType) != 0)
foreach (int fix in tm.rgrgfix_by_type[(int)(e.FlagsId & Edge.Flag.MultiIdMask)])
_prune_below(fix, e.Mark);
}
/// remove the edge
_remove_edge(i_feat, m);
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
public void _remove_edge(int i_feat, int mark)
{
Debug.Assert(mark != 0);
uint v;
int ix = hidx[v = (uint)((i_feat << 16) ^ mark) % c_hashes];
if (ix != -1)
{
arr_tfs_entry ent;
int prv = -1;
if (p_entries != null)
{
do
{
ent = p_entries[ix];
if (ent.mark == mark && ent.i_feat == i_feat)
{
Debugger.Break();
if (ent.e.FlagsId < 0 && --coref_tally_map[ent.e.Mark] == 0)
c_corefs--;
if (prv == -1)
hidx[v] = ent.next;
else
p_entries[prv].next = ent.next;
p_entries[ix] = default(arr_tfs_entry);
return;
}
prv = ix;
}
while ((ix = ent.next) != -1);
}
else
{
do
{
ent = entries[ix];
if (ent.mark == mark && ent.i_feat == i_feat)
{
Debugger.Break();
if (ent.e.FlagsId < 0 && --coref_tally_map[ent.e.Mark] == 0)
c_corefs--;
if (prv == -1)
hidx[v] = ent.next;
else
entries[prv].next = ent.next;
entries[ix] = default(arr_tfs_entry);
return;
}
prv = ix;
}
while ((ix = ent.next) != -1);
}
}
return;
}
#endif
public override bool IsRestricted { get { return f_restricted; } }
public override int EdgeCount { get { return entries.Length; } }
public override int ScratchAlloc { get { return entries.Length; } }
public override Tfs Clone() { return coref_tally_map == null ? this : new ArrayTfs(this); }
public override bool RemoveEdge(int i_feat, int mark) { throw new InvalidOperationException(); }
public override void AddEdge(int i_feat, int mark, Edge e) { throw new InvalidOperationException(); }
public override bool _ContainsInMark(int mark)
{
{
for (int i = 0; i < entries.Length; i++)
if (entries[i].mark == mark)
return true;
}
return false;
}
//public override IEnumerable<arr_tfs_entry> _fme
//{
// get
// {
// for (int i = 0; i < entries.Length; i++)
// yield return p_entries[i];
// }
//}
[DebuggerBrowsable(DebuggerBrowsableState.Never)]
public override IEnumerable<FeatMarkEdge> FeatMarkEdges
{
get
{
return entries.Select(ent => new FeatMarkEdge(ent.i_feat, ent.mark, ent.e));
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary>
///
/// </summary>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static unsafe class Kernel32
{
[DllImport("kernel32.dll")]
public static extern void CopyMemory(void* dest, void* src, int length);
}
#if ILFUNC
[ILFunc(@"
ldarg.0
ldarg.1
ldarg.2
cpblk
ret
")]
#endif
public static void CopyMemory(void* dest, void* src, int length)
{
Kernel32.CopyMemory(dest, src, length);
}
};
}