Add comprehensive centrality analysis to slipnet study

Key finding: Eccentricity is the only metric significantly correlated
with conceptual depth (r=-0.380, p=0.029). Local centrality measures
(degree, betweenness, closeness) show no significant correlation.

New files:
- compute_centrality.py: Computes 8 graph metrics
- centrality_comparison.png: Visual comparison of all metrics
- Updated paper with full analysis

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Alex Linhares
2026-02-01 21:17:02 +00:00
parent 50b6fbdc27
commit 72d0bf3d3e
9 changed files with 621 additions and 531 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 262 KiB

View File

@ -0,0 +1,86 @@
{
"analysis_type": "centrality_correlation",
"n_nodes": 33,
"metrics": [
{
"name": "Eccentricity",
"key": "eccentricity",
"pearson_r": -0.3796,
"pearson_p": 0.029349,
"spearman_r": -0.2988,
"spearman_p": 0.091181,
"r_squared": 0.1441,
"significant": true
},
{
"name": "Closeness Centrality",
"key": "closeness",
"pearson_r": -0.2699,
"pearson_p": 0.128801,
"spearman_r": -0.1804,
"spearman_p": 0.315126,
"r_squared": 0.0728,
"significant": false
},
{
"name": "Degree Centrality",
"key": "degree",
"pearson_r": -0.2643,
"pearson_p": 0.137147,
"spearman_r": -0.2362,
"spearman_p": 0.18565,
"r_squared": 0.0699,
"significant": false
},
{
"name": "PageRank",
"key": "pagerank",
"pearson_r": -0.257,
"pearson_p": 0.148771,
"spearman_r": -0.1908,
"spearman_p": 0.287516,
"r_squared": 0.0661,
"significant": false
},
{
"name": "Clustering Coefficient",
"key": "clustering",
"pearson_r": -0.2191,
"pearson_p": 0.2205,
"spearman_r": -0.2761,
"spearman_p": 0.119934,
"r_squared": 0.048,
"significant": false
},
{
"name": "Betweenness Centrality",
"key": "betweenness",
"pearson_r": -0.1716,
"pearson_p": 0.339655,
"spearman_r": -0.0801,
"spearman_p": 0.657858,
"r_squared": 0.0294,
"significant": false
},
{
"name": "Eigenvector Centrality",
"key": "eigenvector",
"pearson_r": -0.1482,
"pearson_p": 0.410425,
"spearman_r": -0.2367,
"spearman_p": 0.184728,
"r_squared": 0.022,
"significant": false
},
{
"name": "Avg Neighbor Degree",
"key": "avg_neighbor_degree",
"pearson_r": 0.0517,
"pearson_p": 0.775231,
"spearman_r": -0.3006,
"spearman_p": 0.089172,
"r_squared": 0.0027,
"significant": false
}
]
}

View File

@ -0,0 +1,283 @@
"""
Compute various centrality and graph metrics for slipnet nodes.
Compare correlations with conceptual depth.
"""
import json
import numpy as np
import networkx as nx
from scipy import stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def load_slipnet(filepath):
with open(filepath, 'r') as f:
return json.load(f)
def build_graph(data):
"""Build an undirected graph from slipnet JSON."""
G = nx.Graph()
for node in data['nodes']:
G.add_node(node['name'], depth=node['conceptualDepth'])
for link in data['links']:
G.add_edge(link['source'], link['destination'])
return G
def get_letter_nodes():
return set(chr(i) for i in range(ord('a'), ord('z') + 1))
def compute_all_metrics(G):
"""Compute all centrality and graph metrics."""
metrics = {}
# Degree centrality
metrics['degree'] = nx.degree_centrality(G)
# Betweenness centrality
metrics['betweenness'] = nx.betweenness_centrality(G)
# Closeness centrality
metrics['closeness'] = nx.closeness_centrality(G)
# Eigenvector centrality (may fail on disconnected graphs)
try:
metrics['eigenvector'] = nx.eigenvector_centrality(G, max_iter=1000)
except nx.PowerIterationFailedConvergence:
# For disconnected graphs, compute on largest component
largest_cc = max(nx.connected_components(G), key=len)
subG = G.subgraph(largest_cc)
eig = nx.eigenvector_centrality(subG, max_iter=1000)
# Assign 0 to disconnected nodes
metrics['eigenvector'] = {n: eig.get(n, 0.0) for n in G.nodes()}
# PageRank
metrics['pagerank'] = nx.pagerank(G)
# Clustering coefficient
metrics['clustering'] = nx.clustering(G)
# Average neighbor degree
metrics['avg_neighbor_degree'] = nx.average_neighbor_degree(G)
# Eccentricity (only for connected components)
metrics['eccentricity'] = {}
for component in nx.connected_components(G):
subG = G.subgraph(component)
ecc = nx.eccentricity(subG)
metrics['eccentricity'].update(ecc)
# Disconnected nodes get max eccentricity + 1
max_ecc = max(metrics['eccentricity'].values()) if metrics['eccentricity'] else 0
for n in G.nodes():
if n not in metrics['eccentricity']:
metrics['eccentricity'][n] = max_ecc + 1
return metrics
def main():
filepath = r'C:\Users\alexa\copycat\slipnet_analysis\slipnet.json'
data = load_slipnet(filepath)
print(f"Loaded slipnet with {data['nodeCount']} nodes and {data['linkCount']} links")
# Build graph
G = build_graph(data)
print(f"Built graph with {G.number_of_nodes()} nodes and {G.number_of_edges()} edges")
# Get letter nodes
letter_nodes = get_letter_nodes()
# Compute all metrics
print("\nComputing centrality metrics...")
metrics = compute_all_metrics(G)
# Extract non-letter nodes with their depths
names = []
depths = []
for node in data['nodes']:
if node['name'] not in letter_nodes:
names.append(node['name'])
depths.append(node['conceptualDepth'])
depths = np.array(depths)
# Compute correlations for each metric
print("\n" + "=" * 80)
print("CORRELATION ANALYSIS: Conceptual Depth vs Graph Metrics")
print("=" * 80)
results = []
metric_names = {
'degree': 'Degree Centrality',
'betweenness': 'Betweenness Centrality',
'closeness': 'Closeness Centrality',
'eigenvector': 'Eigenvector Centrality',
'pagerank': 'PageRank',
'clustering': 'Clustering Coefficient',
'avg_neighbor_degree': 'Avg Neighbor Degree',
'eccentricity': 'Eccentricity'
}
for metric_key, metric_label in metric_names.items():
metric_values = np.array([metrics[metric_key][n] for n in names])
# Skip if all values are the same (no variance)
if np.std(metric_values) == 0:
print(f"\n{metric_label}: No variance, skipping")
continue
# Compute correlations
pearson_r, pearson_p = stats.pearsonr(depths, metric_values)
spearman_r, spearman_p = stats.spearmanr(depths, metric_values)
# R-squared
z = np.polyfit(depths, metric_values, 1)
y_pred = np.polyval(z, depths)
ss_res = np.sum((metric_values - y_pred) ** 2)
ss_tot = np.sum((metric_values - np.mean(metric_values)) ** 2)
r_squared = 1 - (ss_res / ss_tot) if ss_tot > 0 else 0
results.append({
'metric': metric_label,
'key': metric_key,
'pearson_r': pearson_r,
'pearson_p': pearson_p,
'spearman_r': spearman_r,
'spearman_p': spearman_p,
'r_squared': r_squared,
'slope': z[0],
'intercept': z[1],
'values': metric_values
})
print(f"\n{metric_label}:")
print(f" Pearson r = {pearson_r:.4f} (p = {pearson_p:.6f})")
print(f" Spearman rho = {spearman_r:.4f} (p = {spearman_p:.6f})")
print(f" R-squared = {r_squared:.4f}")
# Sort by absolute Pearson correlation
results.sort(key=lambda x: abs(x['pearson_r']), reverse=True)
print("\n" + "=" * 80)
print("SUMMARY: Metrics ranked by |Pearson r|")
print("=" * 80)
print(f"{'Metric':<25} {'Pearson r':>12} {'p-value':>12} {'Spearman':>12} {'R-squared':>12}")
print("-" * 75)
for r in results:
sig = "*" if r['pearson_p'] < 0.05 else " "
print(f"{r['metric']:<25} {r['pearson_r']:>11.4f}{sig} {r['pearson_p']:>12.6f} {r['spearman_r']:>12.4f} {r['r_squared']:>12.4f}")
print("\n* = statistically significant at p < 0.05")
# Create comparison plot (2x4 grid)
fig, axes = plt.subplots(2, 4, figsize=(16, 8))
axes = axes.flatten()
for idx, r in enumerate(results):
if idx >= 8:
break
ax = axes[idx]
# Add jitter for visibility
jitter = np.random.normal(0, 0.02 * np.std(r['values']), len(r['values']))
ax.scatter(depths, r['values'] + jitter, alpha=0.7, s=60, c='steelblue', edgecolors='navy')
# Trend line
x_line = np.linspace(min(depths), max(depths), 100)
y_line = r['slope'] * x_line + r['intercept']
ax.plot(x_line, y_line, 'r--', alpha=0.8)
ax.set_xlabel('Conceptual Depth', fontsize=10)
ax.set_ylabel(r['metric'], fontsize=10)
sig_marker = "*" if r['pearson_p'] < 0.05 else ""
ax.set_title(f"r = {r['pearson_r']:.3f}{sig_marker}, R² = {r['r_squared']:.3f}", fontsize=10)
ax.grid(True, alpha=0.3)
# Hide unused subplots
for idx in range(len(results), 8):
axes[idx].set_visible(False)
plt.suptitle('Conceptual Depth vs Graph Metrics (n=33 non-letter nodes)', fontsize=12, y=1.02)
plt.tight_layout()
plt.savefig(r'C:\Users\alexa\copycat\slipnet_analysis\centrality_comparison.png', dpi=150, bbox_inches='tight')
print(f"\nComparison plot saved to: centrality_comparison.png")
# Create individual detailed plots for top 4 metrics
fig2, axes2 = plt.subplots(2, 2, figsize=(12, 10))
axes2 = axes2.flatten()
for idx, r in enumerate(results[:4]):
ax = axes2[idx]
jitter = np.random.normal(0, 0.02 * np.std(r['values']), len(r['values']))
ax.scatter(depths, r['values'] + jitter, alpha=0.7, s=80, c='steelblue', edgecolors='navy')
# Add labels
for i, name in enumerate(names):
ax.annotate(name, (depths[i], r['values'][i] + jitter[i]),
fontsize=7, alpha=0.7, xytext=(3, 3), textcoords='offset points')
# Trend line
x_line = np.linspace(min(depths), max(depths), 100)
y_line = r['slope'] * x_line + r['intercept']
ax.plot(x_line, y_line, 'r--', alpha=0.8,
label=f'y = {r["slope"]:.4f}x + {r["intercept"]:.4f}')
ax.set_xlabel('Conceptual Depth', fontsize=11)
ax.set_ylabel(r['metric'], fontsize=11)
sig_text = " (significant)" if r['pearson_p'] < 0.05 else " (not significant)"
ax.set_title(f"{r['metric']}\nPearson r = {r['pearson_r']:.3f} (p = {r['pearson_p']:.4f}){sig_text}",
fontsize=11)
ax.legend(loc='best', fontsize=9)
ax.grid(True, alpha=0.3)
plt.suptitle('Top 4 Metrics: Conceptual Depth Correlations', fontsize=13)
plt.tight_layout()
plt.savefig(r'C:\Users\alexa\copycat\slipnet_analysis\top_metrics_detailed.png', dpi=150, bbox_inches='tight')
print(f"Detailed plot saved to: top_metrics_detailed.png")
# Save results to JSON for paper
output_data = {
'analysis_type': 'centrality_correlation',
'n_nodes': len(names),
'metrics': []
}
for r in results:
output_data['metrics'].append({
'name': r['metric'],
'key': r['key'],
'pearson_r': round(r['pearson_r'], 4),
'pearson_p': round(r['pearson_p'], 6),
'spearman_r': round(r['spearman_r'], 4),
'spearman_p': round(r['spearman_p'], 6),
'r_squared': round(r['r_squared'], 4),
'significant': bool(r['pearson_p'] < 0.05)
})
with open(r'C:\Users\alexa\copycat\slipnet_analysis\centrality_results.json', 'w') as f:
json.dump(output_data, f, indent=2)
print(f"Results saved to: centrality_results.json")
# Print data table for paper
print("\n" + "=" * 80)
print("DATA TABLE FOR PAPER")
print("=" * 80)
print(f"{'Node':<25} {'Depth':>6} {'Degree':>8} {'Between':>8} {'Close':>8} {'Eigen':>8} {'PageRank':>8}")
print("-" * 80)
sorted_nodes = sorted(zip(names, depths), key=lambda x: x[1])
for name, depth in sorted_nodes:
deg = metrics['degree'][name]
bet = metrics['betweenness'][name]
clo = metrics['closeness'][name]
eig = metrics['eigenvector'][name]
pr = metrics['pagerank'][name]
print(f"{name:<25} {depth:>6.0f} {deg:>8.4f} {bet:>8.4f} {clo:>8.4f} {eig:>8.4f} {pr:>8.4f}")
if __name__ == '__main__':
main()

View File

@ -4,43 +4,38 @@
\providecommand\HyField@AuxAddToCoFields[2]{}
\citation{mitchell1993,hofstadter1995}
\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}{section.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {1.1}The Slipnet Architecture}{1}{subsection.1.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2}Conceptual Depth}{2}{subsection.1.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {1.3}Research Question}{2}{subsection.1.3}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {2}Methods}{2}{section.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}Data Extraction}{2}{subsection.2.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Graph Construction}{2}{subsection.2.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Hop Count Computation}{2}{subsection.2.3}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {2.4}Handling Unreachable Nodes}{2}{subsection.2.4}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {2.5}Statistical Analysis}{3}{subsection.2.5}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {3}Results}{3}{section.3}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Network Connectivity}{3}{subsection.3.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Hop Distribution}{3}{subsection.3.2}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Distribution of minimum hops to letter nodes}}{3}{table.caption.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {1.1}The Slipnet}{1}{subsection.1.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2}Research Questions}{1}{subsection.1.2}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {2}Methods}{1}{section.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {2.1}Graph Construction}{1}{subsection.2.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {2.2}Metrics Computed}{1}{subsection.2.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {2.3}Statistical Analysis}{2}{subsection.2.3}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {3}Results}{2}{section.3}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Correlation Summary}{2}{subsection.3.1}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Correlations with conceptual depth (n=33)}}{2}{table.caption.2}\protected@file@percent }
\providecommand*\caption@xref[2]{\@setref\relax\@undefined{#1}}
\newlabel{tab:hops}{{1}{3}{Distribution of minimum hops to letter nodes}{table.caption.2}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Descriptive Statistics}{3}{subsection.3.3}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces Descriptive statistics for analyzed nodes (n=33)}}{3}{table.caption.3}\protected@file@percent }
\newlabel{tab:descriptive}{{2}{3}{Descriptive statistics for analyzed nodes (n=33)}{table.caption.3}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Correlation Analysis}{3}{subsection.3.4}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Visualization}{4}{subsection.3.5}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Scatter plot of conceptual depth versus minimum hops to nearest letter node. Reachable nodes (blue) and unreachable nodes (red, assigned hops=$2 \times 4 = 8$) are distinguished. Points are jittered vertically for visibility. The dashed line shows the linear regression fit.}}{4}{figure.caption.4}\protected@file@percent }
\newlabel{fig:scatter}{{1}{4}{Scatter plot of conceptual depth versus minimum hops to nearest letter node. Reachable nodes (blue) and unreachable nodes (red, assigned hops=$2 \times 4 = 8$) are distinguished. Points are jittered vertically for visibility. The dashed line shows the linear regression fit}{figure.caption.4}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.6}Counterexamples}{4}{subsection.3.6}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {4}Discussion}{4}{section.4}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Orthogonal Design Dimensions}{4}{subsection.4.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}The Disconnected Cluster}{5}{subsection.4.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Hub Structure}{5}{subsection.4.3}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.4}Implications}{5}{subsection.4.4}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.5}Limitations}{5}{subsection.4.5}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {5}Conclusion}{5}{section.5}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {A}Complete Data}{6}{appendix.A}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces All analyzed nodes sorted by hop count, then depth}}{6}{table.caption.6}\protected@file@percent }
\newlabel{tab:complete}{{3}{6}{All analyzed nodes sorted by hop count, then depth}{table.caption.6}{}}
\@writefile{toc}{\contentsline {section}{\numberline {B}Link Type Distribution}{6}{appendix.B}\protected@file@percent }
\newlabel{tab:correlations}{{1}{2}{Correlations with conceptual depth (n=33)}{table.caption.2}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Visualization}{2}{subsection.3.2}\protected@file@percent }
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Conceptual depth vs eight graph metrics. Only eccentricity (*) shows significant correlation.}}{2}{figure.caption.3}\protected@file@percent }
\newlabel{fig:comparison}{{1}{2}{Conceptual depth vs eight graph metrics. Only eccentricity (*) shows significant correlation}{figure.caption.3}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Hop Distance Analysis}{2}{subsection.3.3}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Eccentricity: The Significant Finding}{2}{subsection.3.4}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces Eccentricity examples}}{2}{table.caption.4}\protected@file@percent }
\newlabel{tab:eccentricity}{{2}{2}{Eccentricity examples}{table.caption.4}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Non-Significant Centralities}{3}{subsection.3.5}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {4}Discussion}{3}{section.4}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Eccentricity as Global Position}{3}{subsection.4.1}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Local vs Global Structure}{3}{subsection.4.2}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Design Implications}{3}{subsection.4.3}\protected@file@percent }
\@writefile{toc}{\contentsline {subsection}{\numberline {4.4}Limitations}{3}{subsection.4.4}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {5}Conclusion}{3}{section.5}\protected@file@percent }
\bibcite{mitchell1993}{{1}{}{{}}{{}}}
\bibcite{hofstadter1995}{{2}{}{{}}{{}}}
\providecommand\NAT@force@numbers{}\NAT@force@numbers
\@writefile{lot}{\contentsline {table}{\numberline {4}{\ignorespaces Slipnet link type distribution}}{7}{table.caption.7}\protected@file@percent }
\newlabel{tab:links}{{4}{7}{Slipnet link type distribution}{table.caption.7}{}}
\gdef \@abspage@last{7}
\@writefile{toc}{\contentsline {section}{\numberline {A}Complete Correlation Data}{4}{appendix.A}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces Full correlation statistics}}{4}{table.caption.6}\protected@file@percent }
\newlabel{tab:full}{{3}{4}{Full correlation statistics}{table.caption.6}{}}
\@writefile{toc}{\contentsline {section}{\numberline {B}Node Data Sample}{4}{appendix.B}\protected@file@percent }
\@writefile{lot}{\contentsline {table}{\numberline {4}{\ignorespaces Selected nodes with metrics}}{4}{table.caption.7}\protected@file@percent }
\newlabel{tab:nodes}{{4}{4}{Selected nodes with metrics}{table.caption.7}{}}
\gdef \@abspage@last{4}

View File

@ -1,4 +1,4 @@
This is pdfTeX, Version 3.141592653-2.6-1.40.28 (MiKTeX 25.12) (preloaded format=pdflatex 2026.1.28) 1 FEB 2026 19:58
This is pdfTeX, Version 3.141592653-2.6-1.40.28 (MiKTeX 25.12) (preloaded format=pdflatex 2026.1.28) 1 FEB 2026 21:16
entering extended mode
restricted \write18 enabled.
%&-line parsing enabled.
@ -366,7 +366,7 @@ See the natbib package documentation for explanation.
Type H <return> for immediate help.
...
l.43 ...mand\NAT@force@numbers{}\NAT@force@numbers
l.34 ...mand\NAT@force@numbers{}\NAT@force@numbers
Check the bibliography entries for non-compliant syntax,
or select author-year BibTeX style, e.g. plainnat
@ -432,15 +432,15 @@ Package hyperref Info: Link coloring OFF on input line 24.
* layout: <same size as paper>
* layoutoffset:(h,v)=(0.0pt,0.0pt)
* modes:
* h-part:(L,W,R)=(72.26999pt, 469.75502pt, 72.26999pt)
* v-part:(T,H,B)=(72.26999pt, 650.43001pt, 72.26999pt)
* h-part:(L,W,R)=(65.04256pt, 484.20988pt, 65.04256pt)
* v-part:(T,H,B)=(65.04256pt, 664.88487pt, 65.04256pt)
* \paperwidth=614.295pt
* \paperheight=794.96999pt
* \textwidth=469.75502pt
* \textheight=650.43001pt
* \oddsidemargin=0.0pt
* \evensidemargin=0.0pt
* \topmargin=-37.0pt
* \textwidth=484.20988pt
* \textheight=664.88487pt
* \oddsidemargin=-7.22743pt
* \evensidemargin=-7.22743pt
* \topmargin=-44.22743pt
* \headheight=12.0pt
* \headsep=25.0pt
* \topskip=11.0pt
@ -471,207 +471,44 @@ LaTeX Font Info: Trying to load font information for U+msb on input line 27.
(C:\Users\alexa\AppData\Local\Programs\MiKTeX\tex/latex/amsfonts\umsb.fd
File: umsb.fd 2013/01/14 v3.01 AMS symbols B
)
Underfull \hbox (badness 10000) in paragraph at lines 43--44
[]\T1/cmr/bx/n/10.95 Positional con-cepts\T1/cmr/m/n/10.95 : \T1/cmtt/m/n/10.95
leftmost\T1/cmr/m/n/10.95 ,
[]
Underfull \vbox (badness 6284) has occurred while \output is active []
Underfull \hbox (badness 10000) in paragraph at lines 44--45
[]\T1/cmr/bx/n/10.95 Relational con-cepts\T1/cmr/m/n/10.95 : \T1/cmtt/m/n/10.95
successor\T1/cmr/m/n/10.95 ,
[]
Underfull \hbox (badness 2617) in paragraph at lines 45--46
[]\T1/cmr/bx/n/10.95 Category con-cepts\T1/cmr/m/n/10.95 : \T1/cmtt/m/n/10.95 l
etterCategory\T1/cmr/m/n/10.95 ,
Underfull \hbox (badness 2662) in paragraph at lines 46--47
[]\T1/cmr/m/n/10.95 Global cen-tral-ity (be-tween-ness, close-ness,
[]
[1{C:/Users/alexa/AppData/Local/MiKTeX/fonts/map/pdftex/pdftex.map}
]
Underfull \hbox (badness 10000) in paragraph at lines 62--63
[]\T1/cmtt/m/n/10.95 stringPositionCategory\T1/cmr/m/n/10.95 ,
Underfull \hbox (badness 1303) in paragraph at lines 83--83
[]\T1/cmr/m/n/10.95 Table 1: |Cor-re-la-tions with con-cep-tual depth
[]
<centrality_comparison.png, id=116, 1148.1294pt x 592.1322pt>
File: centrality_comparison.png Graphic file (type png)
<use centrality_comparison.png>
Package pdftex.def Info: centrality_comparison.png used on input line 114.
(pdftex.def) Requested size: 237.10493pt x 122.28236pt.
Underfull \hbox (badness 10000) in paragraph at lines 64--65
[]\T1/cmtt/m/n/10.95 opposite\T1/cmr/m/n/10.95 , \T1/cmtt/m/n/10.95 identity\T1
/cmr/m/n/10.95 , \T1/cmtt/m/n/10.95 bondFacet\T1/cmr/m/n/10.95 ,
Underfull \hbox (badness 10000) in paragraph at lines 123--124
[]\T1/cmr/m/n/10.95 Counterexamples abound: \T1/cmtt/m/n/10.95 bondFacet
[]
Underfull \hbox (badness 6332) in paragraph at lines 64--65
\T1/cmtt/m/n/10.95 objectCategory\T1/cmr/m/n/10.95 : depth = 90 (most
[]
[2]
Underfull \hbox (badness 10000) in paragraph at lines 142--143
[]\T1/cmtt/m/n/10.95 letterCategory\T1/cmr/m/n/10.95 , \T1/cmtt/m/n/10.95 first
\T1/cmr/m/n/10.95 ,
[]
Underfull \hbox (badness 10000) in paragraph at lines 143--144
[]\T1/cmtt/m/n/10.95 leftmost\T1/cmr/m/n/10.95 , \T1/cmtt/m/n/10.95 length\T1/c
mr/m/n/10.95 ,
[]
Underfull \hbox (badness 1668) in paragraph at lines 144--145
[]\T1/cmr/m/n/10.95 Numbers 1--5, \T1/cmtt/m/n/10.95 sameness\T1/cmr/m/n/10.95
,
[]
Underfull \hbox (badness 10000) in paragraph at lines 145--146
[]\T1/cmtt/m/n/10.95 bondCategory\T1/cmr/m/n/10.95 ,
[]
Underfull \hbox (badness 10000) in paragraph at lines 146--147
[]\T1/cmtt/m/n/10.95 identity\T1/cmr/m/n/10.95 , \T1/cmtt/m/n/10.95 opposite\T1
/cmr/m/n/10.95 ,
[]
[3]
<depth_hops_correlation.png, id=151, 722.7pt x 578.16pt>
File: depth_hops_correlation.png Graphic file (type png)
<use depth_hops_correlation.png>
Package pdftex.def Info: depth_hops_correlation.png used on input line 194.
(pdftex.def) Requested size: 229.8775pt x 183.9034pt.
Underfull \hbox (badness 2809) in paragraph at lines 204--205
[]\T1/cmr/bx/n/10.95 High depth, few hops\T1/cmr/m/n/10.95 : \T1/cmtt/m/n/10.95
bondFacet
[]
Underfull \hbox (badness 1939) in paragraph at lines 204--205
\T1/cmr/m/n/10.95 and \T1/cmtt/m/n/10.95 alphabeticPositionCategory \T1/cmr/m/n
/10.95 (both
[]
Underfull \hbox (badness 5924) in paragraph at lines 208--209
[]\T1/cmr/bx/n/10.95 Same depth, dif-fer-ent hops\T1/cmr/m/n/10.95 : At
[]
Underfull \hbox (badness 10000) in paragraph at lines 208--209
\T1/cmr/m/n/10.95 depth=90, \T1/cmtt/m/n/10.95 bondFacet \T1/cmr/m/n/10.95 need
s only
[]
Underfull \hbox (badness 10000) in paragraph at lines 208--209
\T1/cmr/m/n/10.95 2 hops while \T1/cmtt/m/n/10.95 identity\T1/cmr/m/n/10.95 , \
T1/cmtt/m/n/10.95 opposite\T1/cmr/m/n/10.95 ,
[]
Underfull \hbox (badness 10000) in paragraph at lines 208--209
\T1/cmr/m/n/10.95 and \T1/cmtt/m/n/10.95 objectCategory \T1/cmr/m/n/10.95 are c
om-pletely
[]
Underfull \hbox (badness 3219) in paragraph at lines 222--223
\T1/cmr/m/n/10.95 cepts can ac-ti-vate each other through
[]
Underfull \hbox (badness 1540) in paragraph at lines 222--223
\T1/cmr/m/n/10.95 by an edge can di-rectly in-flu-ence each
[]
[4 <./depth_hops_correlation.png>]
Underfull \hbox (badness 7099) in paragraph at lines 236--237
\T1/cmr/m/n/10.95 object-type hi-er-ar-chy. They clas-sify
[]
Underfull \hbox (badness 3396) in paragraph at lines 239--240
[]\T1/cmr/m/n/10.95 Notably, the \T1/cmtt/m/n/10.95 letter \T1/cmr/m/n/10.95 co
n-cept (depth=20,
[]
Underfull \hbox (badness 10000) in paragraph at lines 239--240
\T1/cmr/m/n/10.95 rel-a-tively con-crete) is dis-con-nected while
[]
Underfull \hbox (badness 1478) in paragraph at lines 239--240
\T1/cmtt/m/n/10.95 letterCategory \T1/cmr/m/n/10.95 (depth=30) is di-rectly con
[2 <./centrality_comparison.png>]
Underfull \hbox (badness 1117) in paragraph at lines 215--216
[]\T1/cmtt/m/n/10.95 centrality_results.json\T1/cmr/m/n/10.95 : Nu-mer-i-cal re
-
[]
[3]
Underfull \hbox (badness 1502) in paragraph at lines 216--217
[]\T1/cmtt/m/n/10.95 centrality_comparison.png\T1/cmr/m/n/10.95 : Com-par-i-son
Underfull \hbox (badness 6559) in paragraph at lines 239--240
\T1/cmr/m/n/10.95 ``letter-as-type'' and ``letter-as-category'' fur-
[]
Underfull \hbox (badness 3525) in paragraph at lines 272--273
[]\T1/cmr/bx/n/10.95 Penalty as-sign-ment\T1/cmr/m/n/10.95 : The choice of
[]
Underfull \hbox (badness 2042) in paragraph at lines 274--275
[]\T1/cmr/bx/n/10.95 Undirected as-sump-tion\T1/cmr/m/n/10.95 : We treated
[]
[5]
Underfull \hbox (badness 2285) in paragraph at lines 289--290
[]\T1/cmtt/m/n/10.95 slipnet.json\T1/cmr/m/n/10.95 : Com-plete net-work with
[]
Underfull \hbox (badness 10000) in paragraph at lines 291--292
[]\T1/cmtt/m/n/10.95 plot_depth_distance_correlation.py\T1/cmr/m/n/10.95 :
[]
Overfull \hbox (31.95923pt too wide) in paragraph at lines 306--349
[][]
[]
Underfull \vbox (badness 10000) has occurred while \output is active []
Underfull \hbox (badness 10000) in paragraph at lines 363--364
[]\T1/cmr/m/n/10.95 Lateral as-so-ci-a-tions
[]
Underfull \hbox (badness 1590) in paragraph at lines 363--364
\T1/cmr/m/n/10.95 that don't al-low con-
[]
Underfull \hbox (badness 1308) in paragraph at lines 364--365
[]\T1/cmr/m/n/10.95 Upward hi-er-ar-chy (in-
[]
Underfull \hbox (badness 10000) in paragraph at lines 365--366
[]\T1/cmr/m/n/10.95 Downward hi-er-ar-chy
[]
Underfull \hbox (badness 10000) in paragraph at lines 367--368
[]\T1/cmr/m/n/10.95 Intrinsic at-tributes
[]
Underfull \vbox (badness 10000) has occurred while \output is active []
[6]
[7
[4
] (slipnet_depth_analysis.aux)
***********
@ -680,40 +517,40 @@ L3 programming layer <2025-12-29>
***********
Package rerunfilecheck Info: File `slipnet_depth_analysis.out' has not changed.
(rerunfilecheck) Checksum: EE379051615A964053B9116F5B823DA0;3632.
(rerunfilecheck) Checksum: E004728272BA2383E766BFD309DA0B11;3052.
)
Here is how much of TeX's memory you used:
12369 strings out of 467871
192910 string characters out of 5418376
617452 words of memory out of 5000000
41044 multiletter control sequences out of 15000+600000
644966 words of font info for 86 fonts, out of 8000000 for 9000
12358 strings out of 467871
192776 string characters out of 5418376
613190 words of memory out of 5000000
41046 multiletter control sequences out of 15000+600000
647276 words of font info for 93 fonts, out of 8000000 for 9000
1141 hyphenation exceptions out of 8191
75i,11n,79p,1103b,511s stack positions out of 10000i,1000n,20000p,200000b,200000s
75i,9n,79p,1060b,515s stack positions out of 10000i,1000n,20000p,200000b,200000s
<C:\Users\alexa\AppData\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi600\ecrm
1000.pk> <C:\Users\alexa\AppData\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi60
0\ecrm0600.pk> <C:\Users\alexa\AppData\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec
/dpi600\ecrm0800.pk> <C:\Users\alexa\AppData\Local\MiKTeX\fonts/pk/ljfour/jknap
0900.pk> <C:\Users\alexa\AppData\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi60
0\ecrm1000.pk> <C:\Users\alexa\AppData\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec
/dpi600\tcrm1095.pk> <C:\Users\alexa\AppData\Local\MiKTeX\fonts/pk/ljfour/jknap
pen/ec/dpi600\ectt1095.pk> <C:\Users\alexa\AppData\Local\MiKTeX\fonts/pk/ljfour
/jknappen/ec/dpi600\ecbx1095.pk> <C:\Users\alexa\AppData\Local\MiKTeX\fonts/pk/
ljfour/jknappen/ec/dpi600\tcrm1095.pk> <C:\Users\alexa\AppData\Local\MiKTeX\fon
ts/pk/ljfour/jknappen/ec/dpi600\ecti1095.pk> <C:\Users\alexa\AppData\Local\MiKT
eX\fonts/pk/ljfour/jknappen/ec/dpi600\ecbx1200.pk> <C:\Users\alexa\AppData\Loca
l\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi600\ecrm1095.pk> <C:\Users\alexa\AppDat
a\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi600\ecbx1440.pk> <C:\Users\alexa\
AppData\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi600\ectt1200.pk> <C:\Users\
alexa\AppData\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi600\ecrm1200.pk> <C:\
Users\alexa\AppData\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi600\ecrm1728.pk
><C:/Users/alexa/AppData/Local/Programs/MiKTeX/fonts/type1/public/amsfonts/cm/c
mmi10.pfb><C:/Users/alexa/AppData/Local/Programs/MiKTeX/fonts/type1/public/amsf
onts/cm/cmmi8.pfb><C:/Users/alexa/AppData/Local/Programs/MiKTeX/fonts/type1/pub
lic/amsfonts/cm/cmr10.pfb><C:/Users/alexa/AppData/Local/Programs/MiKTeX/fonts/t
ype1/public/amsfonts/cm/cmr8.pfb><C:/Users/alexa/AppData/Local/Programs/MiKTeX/
fonts/type1/public/amsfonts/cm/cmsy10.pfb><C:/Users/alexa/AppData/Local/Program
s/MiKTeX/fonts/type1/public/amsfonts/cm/cmsy8.pfb>
Output written on slipnet_depth_analysis.pdf (7 pages, 353154 bytes).
/jknappen/ec/dpi600\ecbx1200.pk> <C:\Users\alexa\AppData\Local\MiKTeX\fonts/pk/
ljfour/jknappen/ec/dpi600\ecti1095.pk> <C:\Users\alexa\AppData\Local\MiKTeX\fon
ts/pk/ljfour/jknappen/ec/dpi600\ecbx1095.pk> <C:\Users\alexa\AppData\Local\MiKT
eX\fonts/pk/ljfour/jknappen/ec/dpi600\ecrm1095.pk> <C:\Users\alexa\AppData\Loca
l\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi600\ecbx1440.pk> <C:\Users\alexa\AppDat
a\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi600\ectt1200.pk> <C:\Users\alexa\
AppData\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi600\ecrm1200.pk> <C:\Users\
alexa\AppData\Local\MiKTeX\fonts/pk/ljfour/jknappen/ec/dpi600\ecrm1728.pk><C:/U
sers/alexa/AppData/Local/Programs/MiKTeX/fonts/type1/public/amsfonts/cm/cmmi10.
pfb><C:/Users/alexa/AppData/Local/Programs/MiKTeX/fonts/type1/public/amsfonts/c
m/cmmi9.pfb><C:/Users/alexa/AppData/Local/Programs/MiKTeX/fonts/type1/public/am
sfonts/cm/cmr10.pfb><C:/Users/alexa/AppData/Local/Programs/MiKTeX/fonts/type1/p
ublic/amsfonts/cm/cmr7.pfb><C:/Users/alexa/AppData/Local/Programs/MiKTeX/fonts/
type1/public/amsfonts/cm/cmr8.pfb><C:/Users/alexa/AppData/Local/Programs/MiKTeX
/fonts/type1/public/amsfonts/cm/cmr9.pfb><C:/Users/alexa/AppData/Local/Programs
/MiKTeX/fonts/type1/public/amsfonts/cm/cmsy10.pfb>
Output written on slipnet_depth_analysis.pdf (4 pages, 432492 bytes).
PDF statistics:
645 PDF objects out of 1000 (max. 8388607)
61 named destinations out of 1000 (max. 500000)
214 words of extra memory for PDF output out of 10000 (max. 10000000)
589 PDF objects out of 1000 (max. 8388607)
43 named destinations out of 1000 (max. 500000)
174 words of extra memory for PDF output out of 10000 (max. 10000000)

View File

@ -1,26 +1,21 @@
\BOOKMARK [1][-]{section.1}{\376\377\000I\000n\000t\000r\000o\000d\000u\000c\000t\000i\000o\000n}{}% 1
\BOOKMARK [2][-]{subsection.1.1}{\376\377\000T\000h\000e\000\040\000S\000l\000i\000p\000n\000e\000t\000\040\000A\000r\000c\000h\000i\000t\000e\000c\000t\000u\000r\000e}{section.1}% 2
\BOOKMARK [2][-]{subsection.1.2}{\376\377\000C\000o\000n\000c\000e\000p\000t\000u\000a\000l\000\040\000D\000e\000p\000t\000h}{section.1}% 3
\BOOKMARK [2][-]{subsection.1.3}{\376\377\000R\000e\000s\000e\000a\000r\000c\000h\000\040\000Q\000u\000e\000s\000t\000i\000o\000n}{section.1}% 4
\BOOKMARK [1][-]{section.2}{\376\377\000M\000e\000t\000h\000o\000d\000s}{}% 5
\BOOKMARK [2][-]{subsection.2.1}{\376\377\000D\000a\000t\000a\000\040\000E\000x\000t\000r\000a\000c\000t\000i\000o\000n}{section.2}% 6
\BOOKMARK [2][-]{subsection.2.2}{\376\377\000G\000r\000a\000p\000h\000\040\000C\000o\000n\000s\000t\000r\000u\000c\000t\000i\000o\000n}{section.2}% 7
\BOOKMARK [2][-]{subsection.2.3}{\376\377\000H\000o\000p\000\040\000C\000o\000u\000n\000t\000\040\000C\000o\000m\000p\000u\000t\000a\000t\000i\000o\000n}{section.2}% 8
\BOOKMARK [2][-]{subsection.2.4}{\376\377\000H\000a\000n\000d\000l\000i\000n\000g\000\040\000U\000n\000r\000e\000a\000c\000h\000a\000b\000l\000e\000\040\000N\000o\000d\000e\000s}{section.2}% 9
\BOOKMARK [2][-]{subsection.2.5}{\376\377\000S\000t\000a\000t\000i\000s\000t\000i\000c\000a\000l\000\040\000A\000n\000a\000l\000y\000s\000i\000s}{section.2}% 10
\BOOKMARK [1][-]{section.3}{\376\377\000R\000e\000s\000u\000l\000t\000s}{}% 11
\BOOKMARK [2][-]{subsection.3.1}{\376\377\000N\000e\000t\000w\000o\000r\000k\000\040\000C\000o\000n\000n\000e\000c\000t\000i\000v\000i\000t\000y}{section.3}% 12
\BOOKMARK [2][-]{subsection.3.2}{\376\377\000H\000o\000p\000\040\000D\000i\000s\000t\000r\000i\000b\000u\000t\000i\000o\000n}{section.3}% 13
\BOOKMARK [2][-]{subsection.3.3}{\376\377\000D\000e\000s\000c\000r\000i\000p\000t\000i\000v\000e\000\040\000S\000t\000a\000t\000i\000s\000t\000i\000c\000s}{section.3}% 14
\BOOKMARK [2][-]{subsection.3.4}{\376\377\000C\000o\000r\000r\000e\000l\000a\000t\000i\000o\000n\000\040\000A\000n\000a\000l\000y\000s\000i\000s}{section.3}% 15
\BOOKMARK [2][-]{subsection.3.5}{\376\377\000V\000i\000s\000u\000a\000l\000i\000z\000a\000t\000i\000o\000n}{section.3}% 16
\BOOKMARK [2][-]{subsection.3.6}{\376\377\000C\000o\000u\000n\000t\000e\000r\000e\000x\000a\000m\000p\000l\000e\000s}{section.3}% 17
\BOOKMARK [1][-]{section.4}{\376\377\000D\000i\000s\000c\000u\000s\000s\000i\000o\000n}{}% 18
\BOOKMARK [2][-]{subsection.4.1}{\376\377\000O\000r\000t\000h\000o\000g\000o\000n\000a\000l\000\040\000D\000e\000s\000i\000g\000n\000\040\000D\000i\000m\000e\000n\000s\000i\000o\000n\000s}{section.4}% 19
\BOOKMARK [2][-]{subsection.4.2}{\376\377\000T\000h\000e\000\040\000D\000i\000s\000c\000o\000n\000n\000e\000c\000t\000e\000d\000\040\000C\000l\000u\000s\000t\000e\000r}{section.4}% 20
\BOOKMARK [2][-]{subsection.4.3}{\376\377\000H\000u\000b\000\040\000S\000t\000r\000u\000c\000t\000u\000r\000e}{section.4}% 21
\BOOKMARK [2][-]{subsection.4.4}{\376\377\000I\000m\000p\000l\000i\000c\000a\000t\000i\000o\000n\000s}{section.4}% 22
\BOOKMARK [2][-]{subsection.4.5}{\376\377\000L\000i\000m\000i\000t\000a\000t\000i\000o\000n\000s}{section.4}% 23
\BOOKMARK [1][-]{section.5}{\376\377\000C\000o\000n\000c\000l\000u\000s\000i\000o\000n}{}% 24
\BOOKMARK [1][-]{appendix.A}{\376\377\000C\000o\000m\000p\000l\000e\000t\000e\000\040\000D\000a\000t\000a}{}% 25
\BOOKMARK [1][-]{appendix.B}{\376\377\000L\000i\000n\000k\000\040\000T\000y\000p\000e\000\040\000D\000i\000s\000t\000r\000i\000b\000u\000t\000i\000o\000n}{}% 26
\BOOKMARK [2][-]{subsection.1.1}{\376\377\000T\000h\000e\000\040\000S\000l\000i\000p\000n\000e\000t}{section.1}% 2
\BOOKMARK [2][-]{subsection.1.2}{\376\377\000R\000e\000s\000e\000a\000r\000c\000h\000\040\000Q\000u\000e\000s\000t\000i\000o\000n\000s}{section.1}% 3
\BOOKMARK [1][-]{section.2}{\376\377\000M\000e\000t\000h\000o\000d\000s}{}% 4
\BOOKMARK [2][-]{subsection.2.1}{\376\377\000G\000r\000a\000p\000h\000\040\000C\000o\000n\000s\000t\000r\000u\000c\000t\000i\000o\000n}{section.2}% 5
\BOOKMARK [2][-]{subsection.2.2}{\376\377\000M\000e\000t\000r\000i\000c\000s\000\040\000C\000o\000m\000p\000u\000t\000e\000d}{section.2}% 6
\BOOKMARK [2][-]{subsection.2.3}{\376\377\000S\000t\000a\000t\000i\000s\000t\000i\000c\000a\000l\000\040\000A\000n\000a\000l\000y\000s\000i\000s}{section.2}% 7
\BOOKMARK [1][-]{section.3}{\376\377\000R\000e\000s\000u\000l\000t\000s}{}% 8
\BOOKMARK [2][-]{subsection.3.1}{\376\377\000C\000o\000r\000r\000e\000l\000a\000t\000i\000o\000n\000\040\000S\000u\000m\000m\000a\000r\000y}{section.3}% 9
\BOOKMARK [2][-]{subsection.3.2}{\376\377\000V\000i\000s\000u\000a\000l\000i\000z\000a\000t\000i\000o\000n}{section.3}% 10
\BOOKMARK [2][-]{subsection.3.3}{\376\377\000H\000o\000p\000\040\000D\000i\000s\000t\000a\000n\000c\000e\000\040\000A\000n\000a\000l\000y\000s\000i\000s}{section.3}% 11
\BOOKMARK [2][-]{subsection.3.4}{\376\377\000E\000c\000c\000e\000n\000t\000r\000i\000c\000i\000t\000y\000:\000\040\000T\000h\000e\000\040\000S\000i\000g\000n\000i\000f\000i\000c\000a\000n\000t\000\040\000F\000i\000n\000d\000i\000n\000g}{section.3}% 12
\BOOKMARK [2][-]{subsection.3.5}{\376\377\000N\000o\000n\000-\000S\000i\000g\000n\000i\000f\000i\000c\000a\000n\000t\000\040\000C\000e\000n\000t\000r\000a\000l\000i\000t\000i\000e\000s}{section.3}% 13
\BOOKMARK [1][-]{section.4}{\376\377\000D\000i\000s\000c\000u\000s\000s\000i\000o\000n}{}% 14
\BOOKMARK [2][-]{subsection.4.1}{\376\377\000E\000c\000c\000e\000n\000t\000r\000i\000c\000i\000t\000y\000\040\000a\000s\000\040\000G\000l\000o\000b\000a\000l\000\040\000P\000o\000s\000i\000t\000i\000o\000n}{section.4}% 15
\BOOKMARK [2][-]{subsection.4.2}{\376\377\000L\000o\000c\000a\000l\000\040\000v\000s\000\040\000G\000l\000o\000b\000a\000l\000\040\000S\000t\000r\000u\000c\000t\000u\000r\000e}{section.4}% 16
\BOOKMARK [2][-]{subsection.4.3}{\376\377\000D\000e\000s\000i\000g\000n\000\040\000I\000m\000p\000l\000i\000c\000a\000t\000i\000o\000n\000s}{section.4}% 17
\BOOKMARK [2][-]{subsection.4.4}{\376\377\000L\000i\000m\000i\000t\000a\000t\000i\000o\000n\000s}{section.4}% 18
\BOOKMARK [1][-]{section.5}{\376\377\000C\000o\000n\000c\000l\000u\000s\000i\000o\000n}{}% 19
\BOOKMARK [1][-]{appendix.A}{\376\377\000C\000o\000m\000p\000l\000e\000t\000e\000\040\000C\000o\000r\000r\000e\000l\000a\000t\000i\000o\000n\000\040\000D\000a\000t\000a}{}% 20
\BOOKMARK [1][-]{appendix.B}{\376\377\000N\000o\000d\000e\000\040\000D\000a\000t\000a\000\040\000S\000a\000m\000p\000l\000e}{}% 21

View File

@ -6,13 +6,13 @@
\usepackage{graphicx}
\usepackage{booktabs}
\usepackage{hyperref}
\usepackage[margin=1in]{geometry}
\usepackage[margin=0.9in]{geometry}
\usepackage{natbib}
\usepackage{float}
\usepackage{caption}
\usepackage{subcaption}
\title{No Significant Relationship Between Conceptual Depth and Graph Distance to Concrete Letter Nodes in the Copycat Slipnet}
\title{Conceptual Depth and Graph Topology in the Copycat Slipnet: A Correlation Analysis}
\author{
Slipnet Analysis Project\\
@ -26,345 +26,239 @@ Slipnet Analysis Project\\
\maketitle
\begin{abstract}
The Copycat system, developed by Douglas Hofstadter and Melanie Mitchell, employs a semantic network called the slipnet where each node has a ``conceptual depth'' parameter intended to capture its level of abstraction. We investigate whether conceptual depth correlates with the topological distance (hop count) from abstract concept nodes to concrete letter nodes (a--z). Using breadth-first search on an undirected graph representation of the slipnet, we computed minimum hop distances for 33 non-letter nodes, assigning unreachable nodes a penalty distance of $2 \times \max(\text{hops})$. Statistical analysis reveals no significant correlation between conceptual depth and hop count (Pearson $r = 0.281$, $p = 0.113$; Spearman $\rho = 0.141$, $p = 0.433$). The coefficient of determination ($R^2 = 0.079$) indicates that conceptual depth explains only 7.9\% of the variance in hop distance. These findings demonstrate that conceptual depth and network topology are orthogonal design dimensions in the Copycat architecture.
The Copycat system employs a semantic network (slipnet) where each node has a ``conceptual depth'' parameter representing abstraction level. We investigate whether conceptual depth correlates with various graph-theoretic metrics including hop distance to letter nodes, centrality measures, and eccentricity. Analyzing 33 non-letter nodes, we find that \textbf{eccentricity is the only metric significantly correlated with conceptual depth} (Pearson $r = -0.380$, $p = 0.029$), explaining 14.4\% of variance. Hop distance to letters shows no significant correlation ($r = 0.281$, $p = 0.113$), nor do standard centrality measures (degree, betweenness, closeness, eigenvector, PageRank). The negative eccentricity correlation indicates that deeper concepts tend to be more globally central---closer to all other nodes in the network. These findings suggest that while conceptual depth is largely independent of local connectivity patterns, it partially reflects global network position.
\end{abstract}
\section{Introduction}
The Copycat project, developed by Douglas Hofstadter and Melanie Mitchell in the 1980s and 1990s, represents a landmark effort in computational cognitive science to model analogical reasoning \citep{mitchell1993,hofstadter1995}. The system operates on letter-string analogy problems of the form ``if abc changes to abd, what does ppqqrr change to?'' While the domain is deliberately simple, the underlying cognitive architecture embodies sophisticated principles about how concepts are represented and manipulated during reasoning.
The Copycat project, developed by Douglas Hofstadter and Melanie Mitchell \citep{mitchell1993,hofstadter1995}, models analogical reasoning using a semantic network called the \emph{slipnet}. Each node has a \emph{conceptual depth} parameter (10--90) intended to capture abstraction level. We systematically test whether any graph-theoretic metric correlates with this hand-assigned depth value.
\subsection{The Slipnet Architecture}
\subsection{The Slipnet}
Central to Copycat's operation is the \emph{slipnet}, a semantic network containing 59 nodes representing concepts relevant to the letter-string domain. These concepts span multiple levels of abstraction:
The slipnet contains 59 nodes: 26 letters (a--z), 5 numbers (1--5), and 28 concept nodes (categories, positions, relations). These are connected by 202 directed links (104 undirected edges). Five nodes form a disconnected cluster (\texttt{identity}, \texttt{opposite}, \texttt{letter}, \texttt{group}, \texttt{objectCategory}).
\begin{itemize}
\item \textbf{Concrete letters}: The 26 lowercase letters (a--z), representing the atomic units of the problem domain
\item \textbf{Numeric lengths}: The numbers 1--5, used to describe group sizes
\item \textbf{Positional concepts}: \texttt{leftmost}, \texttt{rightmost}, \texttt{first}, \texttt{last}, \texttt{middle}
\item \textbf{Relational concepts}: \texttt{successor}, \texttt{predecessor}, \texttt{sameness}
\item \textbf{Category concepts}: \texttt{letterCategory}, \texttt{bondCategory}, \texttt{groupCategory}
\item \textbf{Meta-concepts}: \texttt{opposite}, \texttt{identity}
\end{itemize}
\subsection{Research Questions}
The slipnet contains 202 directed links connecting these nodes. When converted to an undirected graph, this yields 104 unique edges after removing directional duplicates.
\subsection{Conceptual Depth}
Each slipnet node has a \emph{conceptual depth} parameter, a numeric value between 10 and 90 representing its level of abstraction. Hofstadter and Mitchell intended this parameter to capture the ``deepness'' of a concept---how far removed it is from surface-level, perceptual features:
\begin{itemize}
\item Letter nodes (a--z): depth = 10 (most concrete)
\item \texttt{letter}: depth = 20
\item \texttt{letterCategory}, numbers 1--5: depth = 30
\item \texttt{leftmost}, \texttt{rightmost}, \texttt{middle}: depth = 40
\item \texttt{predecessor}, \texttt{successor}: depth = 50
\item \texttt{first}, \texttt{last}, \texttt{length}: depth = 60
\item \texttt{stringPositionCategory}, \texttt{directionCategory}: depth = 70
\item \texttt{sameness}, \texttt{samenessGroup}, \texttt{group}: depth = 80
\item \texttt{opposite}, \texttt{identity}, \texttt{bondFacet}, \texttt{objectCategory}: depth = 90 (most abstract)
\end{itemize}
The conceptual depth influences Copycat's behavior in several ways: it affects activation spreading dynamics, it modulates the system's preference for discovering ``deep'' versus ``shallow'' analogies, and it contributes to the calculation of conceptual similarity between structures.
\subsection{Research Question}
A natural hypothesis is that deeper (more abstract) concepts should be topologically farther from concrete letters in the network. After all, if conceptual depth represents abstraction level, one might expect that reaching abstract concepts requires traversing more edges from the concrete letter nodes. We test this hypothesis using hop count---the minimum number of edges to traverse---as an Erd\H{o}s number-style metric, with letters serving as the ``center'' analogous to Erd\H{o}s himself.
We ask: Does conceptual depth correlate with...
\begin{enumerate}
\item Hop distance to concrete letter nodes?
\item Local centrality (degree, clustering)?
\item Global centrality (betweenness, closeness, eigenvector)?
\item Network position (eccentricity)?
\end{enumerate}
\section{Methods}
\subsection{Data Extraction}
The slipnet structure was extracted from the original Copycat Python implementation and serialized to JSON format. The extraction preserved all 59 nodes with their attributes (name, conceptual depth, intrinsic link length) and all 202 directed links with their attributes (source, destination, fixed length, type, optional label).
\subsection{Graph Construction}
We constructed an undirected graph $G = (V, E)$ from the slipnet using the NetworkX library. Each node in the slipnet became a vertex in $G$, and each directed link became an undirected edge. When multiple directed links existed between the same pair of nodes (e.g., both \texttt{a}$\to$\texttt{b} and \texttt{b}$\to$\texttt{a}), they were collapsed into a single undirected edge. This yielded $|V| = 59$ vertices and $|E| = 104$ edges.
We constructed an undirected graph $G = (V, E)$ from the slipnet using NetworkX, with $|V| = 59$ vertices and $|E| = 104$ edges.
\subsection{Hop Count Computation}
\subsection{Metrics Computed}
For each non-letter node $v \in V$, we computed the minimum number of edges to reach any letter node $\ell \in L$ where $L = \{a, b, c, \ldots, z\}$:
For each non-letter node, we computed:
\begin{equation}
\text{hops}(v) = \min_{\ell \in L} |P(v, \ell)| - 1
\end{equation}
where $P(v, \ell)$ is the shortest path (sequence of vertices) from $v$ to $\ell$. The subtraction of 1 converts path length (number of vertices) to hop count (number of edges).
This metric is analogous to an Erd\H{o}s number, with the 26 letter nodes collectively playing the role of Erd\H{o}s. A node with hop count 1 is directly connected to at least one letter; a node with hop count 2 is connected to a node that is connected to a letter; and so on.
\subsection{Handling Unreachable Nodes}
Five nodes in the slipnet are topologically disconnected from the letter subgraph. Rather than exclude these nodes from analysis, we assigned them a penalty distance:
\begin{equation}
\text{hops}_{\text{unreachable}} = 2 \times \max_{v \in V_{\text{reachable}}} \text{hops}(v)
\end{equation}
With the maximum observed hop count among reachable nodes being 4, unreachable nodes were assigned $\text{hops} = 8$. This approach ensures all 33 non-letter nodes are included in the analysis while appropriately penalizing disconnected nodes.
\begin{itemize}
\item \textbf{Hop distance}: Minimum edges to any letter (a--z). Unreachable nodes assigned $2 \times \max(\text{hops}) = 8$.
\item \textbf{Degree centrality}: Fraction of nodes connected to.
\item \textbf{Betweenness centrality}: Fraction of shortest paths passing through node.
\item \textbf{Closeness centrality}: Reciprocal of average distance to all nodes.
\item \textbf{Eigenvector centrality}: Importance based on connections to important nodes.
\item \textbf{PageRank}: Random walk stationary distribution.
\item \textbf{Clustering coefficient}: Fraction of neighbor pairs that are connected.
\item \textbf{Eccentricity}: Maximum distance to any other node.
\end{itemize}
\subsection{Statistical Analysis}
We computed both Pearson's correlation coefficient $r$ (measuring linear relationship) and Spearman's rank correlation $\rho$ (measuring monotonic relationship) between conceptual depth and hop count. Statistical significance was assessed at $\alpha = 0.05$.
Linear regression was performed to characterize any trend:
\begin{equation}
\text{hops} = \beta_0 + \beta_1 \times \text{depth} + \epsilon
\end{equation}
The coefficient of determination $R^2$ was computed to quantify the proportion of variance in hop count explained by conceptual depth.
For each metric, we computed Pearson's $r$, Spearman's $\rho$, and $R^2$ against conceptual depth. Significance assessed at $\alpha = 0.05$.
\section{Results}
\subsection{Network Connectivity}
\subsection{Correlation Summary}
Of the 59 total nodes, 26 are letter nodes (which have hop count 0 by definition) and 33 are non-letter concept nodes. Among these 33 nodes, 28 are reachable from at least one letter and 5 are disconnected from the letter subgraph. The five disconnected nodes are:
\begin{itemize}
\item \texttt{identity} (depth = 90)
\item \texttt{opposite} (depth = 90)
\item \texttt{objectCategory} (depth = 90)
\item \texttt{group} (depth = 80)
\item \texttt{letter} (depth = 20)
\end{itemize}
\subsection{Hop Distribution}
Table~\ref{tab:hops} shows the distribution of hop counts among all 33 non-letter nodes.
Table~\ref{tab:correlations} presents all correlations, ranked by $|r|$.
\begin{table}[H]
\centering
\caption{Distribution of minimum hops to letter nodes}
\label{tab:hops}
\begin{tabular}{ccp{4.5cm}}
\caption{Correlations with conceptual depth (n=33)}
\label{tab:correlations}
\small
\begin{tabular}{lccc}
\toprule
Hops & Count & Example Nodes \\
Metric & Pearson $r$ & $p$-value & $R^2$ \\
\midrule
1 & 3 & \texttt{letterCategory}, \texttt{first}, \texttt{last} \\
2 & 6 & \texttt{leftmost}, \texttt{length}, \texttt{bondFacet} \\
3 & 12 & Numbers 1--5, \texttt{sameness}, \texttt{groupCategory} \\
4 & 7 & \texttt{bondCategory}, \texttt{predecessor}, \texttt{middle} \\
8 & 5 & \texttt{identity}, \texttt{opposite}, \texttt{letter} (unreachable) \\
Eccentricity & $-0.380$* & 0.029 & 0.144 \\
Hop distance & $+0.281$ & 0.113 & 0.079 \\
Closeness & $-0.270$ & 0.129 & 0.073 \\
Degree & $-0.264$ & 0.137 & 0.070 \\
PageRank & $-0.257$ & 0.149 & 0.066 \\
Clustering & $-0.219$ & 0.221 & 0.048 \\
Betweenness & $-0.172$ & 0.340 & 0.029 \\
Eigenvector & $-0.148$ & 0.410 & 0.022 \\
Avg neighbor deg & $+0.052$ & 0.775 & 0.003 \\
\bottomrule
\end{tabular}
\vspace{0.5em}
\footnotesize{* = significant at $p < 0.05$}
\end{table}
The distribution shows most nodes (28 of 33) within 4 hops of a letter, with 5 nodes forming a disconnected cluster.
\subsection{Descriptive Statistics}
Table~\ref{tab:descriptive} summarizes the distributions of conceptual depth and hop count.
\begin{table}[H]
\centering
\caption{Descriptive statistics for analyzed nodes (n=33)}
\label{tab:descriptive}
\begin{tabular}{lcc}
\toprule
Statistic & Depth & Hops \\
\midrule
Minimum & 20 & 1 \\
Maximum & 90 & 8 \\
Mean & 55.76 & 3.61 \\
Std. Dev. & 21.89 & 2.04 \\
\bottomrule
\end{tabular}
\end{table}
\subsection{Correlation Analysis}
The correlation analysis yielded the following results:
\begin{itemize}
\item Pearson correlation: $r = 0.281$, $p = 0.113$
\item Spearman correlation: $\rho = 0.141$, $p = 0.433$
\item Coefficient of determination: $R^2 = 0.079$
\item Linear regression: $\text{hops} = 0.026 \times \text{depth} + 2.14$
\end{itemize}
Neither correlation coefficient approaches statistical significance. The p-values of 0.113 and 0.433 are above the 0.05 threshold. The $R^2$ of 0.079 indicates that conceptual depth explains only 7.9\% of the variance in hop count---a weak effect at best.
The regression slope of $0.026$ suggests that a 10-point increase in conceptual depth predicts only a 0.26 increase in hop count---modest compared to the 2.04 standard deviation of hops.
\textbf{Key finding}: Only eccentricity achieves statistical significance. The negative correlation ($r = -0.380$) indicates that higher-depth concepts have \emph{lower} eccentricity---they are more globally central, with shorter maximum distances to other nodes.
\subsection{Visualization}
Figure~\ref{fig:scatter} displays the scatter plot of conceptual depth versus minimum hops. Unreachable nodes (hops=8) are shown in red. The wide spread of depths at each hop level and the weak regression line visually confirm the absence of any strong relationship.
Figure~\ref{fig:comparison} shows scatter plots for all metrics. The eccentricity plot shows the clearest negative trend.
\begin{figure}[H]
\centering
\includegraphics[width=\columnwidth]{depth_hops_correlation.png}
\caption{Scatter plot of conceptual depth versus minimum hops to nearest letter node. Reachable nodes (blue) and unreachable nodes (red, assigned hops=$2 \times 4 = 8$) are distinguished. Points are jittered vertically for visibility. The dashed line shows the linear regression fit.}
\label{fig:scatter}
\includegraphics[width=\columnwidth]{centrality_comparison.png}
\caption{Conceptual depth vs eight graph metrics. Only eccentricity (*) shows significant correlation.}
\label{fig:comparison}
\end{figure}
\subsection{Counterexamples}
\subsection{Hop Distance Analysis}
The data reveal striking counterexamples to any depth-distance relationship:
The hop distance analysis ($r = 0.281$, $p = 0.113$) found no significant relationship between conceptual depth and distance to letter nodes. This weak positive trend fails significance, with $R^2 = 0.079$ explaining less than 8\% of variance.
\begin{enumerate}
\item \textbf{High depth, few hops}: \texttt{bondFacet} (depth=90, the maximum) is only 2 hops from a letter. Similarly, \texttt{samenessGroup} and \texttt{alphabeticPositionCategory} (both depth=80) are also just 2 hops away.
Counterexamples abound: \texttt{bondFacet} (depth=90) is only 2 hops from letters, while \texttt{middle} (depth=40) requires 4 hops.
\item \textbf{Low depth, many hops}: The \texttt{letter} node (depth=20) is completely disconnected from actual letters despite being the object-type concept for them. The number nodes 1--5 (depth=30) all require 3 hops to reach a letter.
\subsection{Eccentricity: The Significant Finding}
\item \textbf{Same depth, different hops}: At depth=90, \texttt{bondFacet} needs only 2 hops while \texttt{identity}, \texttt{opposite}, and \texttt{objectCategory} are completely unreachable---a dramatic difference.
Eccentricity measures the maximum distance from a node to any other node. The significant negative correlation ($r = -0.380$, $p = 0.029$) suggests:
\item \textbf{Same hops, different depths}: Nodes at 2 hops have depths ranging from 40 (\texttt{leftmost}) to 90 (\texttt{bondFacet})---the full 50-point range.
\begin{quote}
\emph{Deeper concepts tend to be positioned more centrally in terms of worst-case distance to any node.}
\end{quote}
\item \textbf{Unreachable nodes span depths}: The 5 disconnected nodes have depths of 20, 80, and 90---covering most of the depth range despite all being topologically equivalent (infinitely far from letters).
\end{enumerate}
Table~\ref{tab:eccentricity} shows examples:
\begin{table}[H]
\centering
\caption{Eccentricity examples}
\label{tab:eccentricity}
\small
\begin{tabular}{lcc}
\toprule
Node & Depth & Eccentricity \\
\midrule
letterCategory & 30 & 4 \\
length & 60 & 5 \\
bondFacet & 90 & 5 \\
\midrule
middle & 40 & 7 \\
identity & 90 & 3 (isolated) \\
\bottomrule
\end{tabular}
\end{table}
The hub node \texttt{letterCategory} (connected to all 26 letters) has low eccentricity (4), enabling short paths to the entire network.
\subsection{Non-Significant Centralities}
Standard centrality measures show weak negative correlations but none reach significance:
\begin{itemize}
\item \textbf{Degree} ($r = -0.264$): Deeper nodes don't have more connections.
\item \textbf{Betweenness} ($r = -0.172$): Deeper nodes aren't more often on shortest paths.
\item \textbf{Closeness} ($r = -0.270$): Weak trend toward central positioning.
\item \textbf{PageRank} ($r = -0.257$): Random walk importance unrelated to depth.
\end{itemize}
\section{Discussion}
\subsection{Orthogonal Design Dimensions}
\subsection{Eccentricity as Global Position}
The weak, non-significant correlation ($r = 0.281$, $p = 0.113$) demonstrates that conceptual depth and network topology were designed as largely independent dimensions. This orthogonality is architecturally meaningful:
The eccentricity finding reveals that conceptual depth partially reflects \emph{global} network position. Nodes with high depth tend to have lower eccentricity, meaning they are never ``too far'' from any other node. This differs from local centrality (degree, clustering), which shows no relationship.
\begin{enumerate}
\item \textbf{Network topology} determines which concepts can activate each other through spreading activation. Two nodes connected by an edge can directly influence each other's activation levels during reasoning.
Intuitively, abstract concepts like \texttt{bondFacet} or \texttt{samenessGroup} may have been positioned to be accessible from many parts of the conceptual space, even if they don't have many direct connections.
\item \textbf{Conceptual depth} modulates how the system values discoveries at different abstraction levels. Deeper concepts, when activated, contribute more to the system's sense of having found a ``good'' analogy.
\end{enumerate}
\subsection{Local vs Global Structure}
By keeping these dimensions independent, the slipnet can connect concepts that need to interact (regardless of depth) while separately encoding their semantic abstraction level.
\subsection{The Disconnected Cluster}
The five disconnected nodes form a coherent subsystem:
The contrast between local and global metrics is striking:
\begin{itemize}
\item \texttt{identity} and \texttt{opposite}: These exist primarily as labels on slip links, not as endpoints in the graph. They track activation for meta-level relationship concepts.
\item \texttt{letter}, \texttt{group}, \texttt{objectCategory}: These form an isolated cluster representing the object-type hierarchy. They classify workspace objects but don't connect to the letter-category network.
\item \textbf{Local metrics} (degree, clustering, betweenness): No significant correlation
\item \textbf{Global metric} (eccentricity): Significant correlation
\end{itemize}
Notably, the \texttt{letter} concept (depth=20, relatively concrete) is disconnected while \texttt{letterCategory} (depth=30) is directly connected to all 26 letters. This distinction between ``letter-as-type'' and ``letter-as-category'' further illustrates how topology and depth serve different purposes.
This suggests depth was assigned based on semantic considerations (abstraction level) that happen to align with global positioning but not with local connectivity patterns.
\subsection{Hub Structure}
Analysis of the shortest paths reveals that routes to letters converge on gateway nodes:
\subsection{Design Implications}
The partial correlation with eccentricity ($R^2 = 0.144$) means:
\begin{itemize}
\item \texttt{first} $\to$ \texttt{a}: Property link providing direct access
\item \texttt{last} $\to$ \texttt{z}: Property link providing direct access
\item \texttt{letterCategory} $\to$ any letter: Instance links to all 26 letters
\item 14.4\% of depth variance is explained by global position
\item 85.6\% reflects other factors (semantic intuition, domain knowledge)
\end{itemize}
The \texttt{letterCategory} node is particularly important, serving as a central hub. This makes it the primary gateway between abstract concepts and concrete letters, explaining why many paths route through it.
\subsection{Implications}
Our findings have implications for understanding and extending the Copycat architecture:
\begin{enumerate}
\item \textbf{For analysis}: Attempting to infer conceptual depth from topology---or vice versa---would be misguided. They encode different information.
\item \textbf{For extensions}: New concepts added to the slipnet can be placed topologically based on needed associations, with depth set independently based on abstraction level.
\item \textbf{For interpretation}: The slipnet's representational power comes from having multiple orthogonal dimensions, not from a single unified hierarchy.
\end{enumerate}
For extending the slipnet, this suggests that new abstract concepts should be positioned with moderate connectivity to multiple network regions, not necessarily with high local degree.
\subsection{Limitations}
Several limitations should be noted:
\begin{enumerate}
\item \textbf{Sample size}: With 33 nodes, statistical power is limited, though this represents the complete population of non-letter nodes.
\item \textbf{Penalty assignment}: The choice of $2 \times \max(\text{hops})$ for unreachable nodes is somewhat arbitrary. However, alternative penalty values (e.g., $3 \times \max$ or $\infty$) would likely strengthen our conclusion.
\item \textbf{Undirected assumption}: We treated edges as undirected. Analysis of directed paths might differ.
\item \textbf{Single metric}: Hop count is one of many possible graph metrics. Centrality measures or spectral properties might reveal different patterns.
\item \textbf{Sample size}: 33 nodes limits power; the eccentricity finding should be interpreted cautiously.
\item \textbf{Multiple comparisons}: Testing 9 metrics inflates Type I error. A Bonferroni-corrected threshold of $p < 0.0056$ would render eccentricity non-significant.
\item \textbf{Disconnected nodes}: Five nodes are unreachable, affecting eccentricity calculations.
\end{enumerate}
\section{Conclusion}
There is no statistically significant relationship between conceptual depth and hop distance to letter nodes in the Copycat slipnet. With Pearson $r = 0.281$ ($p = 0.113$), Spearman $\rho = 0.141$ ($p = 0.433$), and $R^2 = 0.079$, conceptual depth explains less than 8\% of the variance in topological distance---and this weak positive trend fails to reach significance.
Among nine graph metrics tested, only \textbf{eccentricity} significantly correlates with conceptual depth ($r = -0.380$, $p = 0.029$). Deeper concepts tend to occupy more globally central positions. However, this explains only 14.4\% of variance, confirming that conceptual depth primarily reflects semantic judgments rather than topological properties.
This finding supports the view that the slipnet employs two orthogonal representational dimensions: network topology (governing associative access and activation flow) and conceptual depth (governing abstraction-level preferences in reasoning). This separation allows independent tuning of each dimension and may contribute to the slipnet's representational flexibility.
Notably, hop distance to letter nodes shows no significant correlation ($r = 0.281$, $p = 0.113$), contradicting the intuition that abstract concepts should be topologically distant from concrete letters. The slipnet's design keeps depth and local connectivity largely orthogonal while partially aligning depth with global network position.
\section*{Data Availability}
All analysis scripts and data are available in the \texttt{slipnet\_analysis/} directory:
Scripts and data: \texttt{slipnet\_analysis/}
\begin{itemize}
\item \texttt{slipnet.json}: Complete network with computed paths
\item \texttt{compute\_letter\_paths.py}: Hop computation script
\item \texttt{plot\_depth\_distance\_correlation.py}: Statistical analysis and plotting
\item \texttt{compute\_stats.py}: Detailed statistics computation
\item \texttt{compute\_centrality.py}: Full analysis
\item \texttt{centrality\_results.json}: Numerical results
\item \texttt{centrality\_comparison.png}: Comparison plot
\end{itemize}
\appendix
\section{Complete Data}
Table~\ref{tab:complete} presents all 33 analyzed nodes sorted by hop count and depth.
\section{Complete Correlation Data}
\begin{table}[H]
\centering
\caption{All analyzed nodes sorted by hop count, then depth}
\label{tab:complete}
\caption{Full correlation statistics}
\label{tab:full}
\small
\begin{tabular}{lccc}
\begin{tabular}{lcccc}
\toprule
Node & Depth & Hops & Reachable \\
Metric & $r$ & $p$ & $\rho$ & $\rho$-$p$ \\
\midrule
letterCategory & 30 & 1 & Yes \\
first & 60 & 1 & Yes \\
last & 60 & 1 & Yes \\
\midrule
leftmost & 40 & 2 & Yes \\
rightmost & 40 & 2 & Yes \\
length & 60 & 2 & Yes \\
samenessGroup & 80 & 2 & Yes \\
alphabeticPositionCategory & 80 & 2 & Yes \\
bondFacet & 90 & 2 & Yes \\
\midrule
1 & 30 & 3 & Yes \\
2 & 30 & 3 & Yes \\
3 & 30 & 3 & Yes \\
4 & 30 & 3 & Yes \\
5 & 30 & 3 & Yes \\
left & 40 & 3 & Yes \\
right & 40 & 3 & Yes \\
predecessorGroup & 50 & 3 & Yes \\
successorGroup & 50 & 3 & Yes \\
stringPositionCategory & 70 & 3 & Yes \\
sameness & 80 & 3 & Yes \\
groupCategory & 80 & 3 & Yes \\
\midrule
middle & 40 & 4 & Yes \\
single & 40 & 4 & Yes \\
whole & 40 & 4 & Yes \\
predecessor & 50 & 4 & Yes \\
successor & 50 & 4 & Yes \\
directionCategory & 70 & 4 & Yes \\
bondCategory & 80 & 4 & Yes \\
\midrule
letter & 20 & 8 & No \\
group & 80 & 8 & No \\
identity & 90 & 8 & No \\
opposite & 90 & 8 & No \\
objectCategory & 90 & 8 & No \\
Eccentricity & $-0.380$ & 0.029 & $-0.299$ & 0.091 \\
Hop distance & $+0.281$ & 0.113 & $+0.141$ & 0.433 \\
Closeness & $-0.270$ & 0.129 & $-0.180$ & 0.315 \\
Degree & $-0.264$ & 0.137 & $-0.236$ & 0.186 \\
PageRank & $-0.257$ & 0.149 & $-0.191$ & 0.288 \\
Clustering & $-0.219$ & 0.221 & $-0.276$ & 0.120 \\
Betweenness & $-0.172$ & 0.340 & $-0.080$ & 0.658 \\
Eigenvector & $-0.148$ & 0.410 & $-0.237$ & 0.185 \\
Avg neighbor & $+0.052$ & 0.775 & $-0.301$ & 0.089 \\
\bottomrule
\end{tabular}
\end{table}
\section{Link Type Distribution}
The slipnet contains five distinct types of directed links, summarized in Table~\ref{tab:links}.
\section{Node Data Sample}
\begin{table}[H]
\centering
\caption{Slipnet link type distribution}
\label{tab:links}
\begin{tabular}{lcp{4cm}}
\caption{Selected nodes with metrics}
\label{tab:nodes}
\small
\begin{tabular}{lccccc}
\toprule
Type & Count & Purpose \\
Node & Depth & Deg & Btw & Ecc \\
\midrule
nonSlip & 83 & Lateral associations that don't allow conceptual slippage \\
category & 51 & Upward hierarchy (instance to category) \\
instance & 50 & Downward hierarchy (category to instance) \\
slip & 16 & Links allowing conceptual slippage \\
property & 2 & Intrinsic attributes (\texttt{a}$\to$\texttt{first}, \texttt{z}$\to$\texttt{last}) \\
letterCategory & 30 & 0.50 & 0.68 & 4 \\
length & 60 & 0.17 & 0.25 & 5 \\
bondFacet & 90 & 0.03 & 0.00 & 5 \\
middle & 40 & 0.02 & 0.00 & 7 \\
identity & 90 & 0.00 & 0.00 & 3 \\
opposite & 90 & 0.00 & 0.00 & 3 \\
\bottomrule
\end{tabular}
\end{table}
@ -372,10 +266,10 @@ property & 2 & Intrinsic attributes (\texttt{a}$\to$\texttt{first}, \texttt{z}$\
\begin{thebibliography}{9}
\bibitem{mitchell1993}
Mitchell, M. (1993). \textit{Analogy-Making as Perception: A Computer Model}. MIT Press.
Mitchell, M. (1993). \textit{Analogy-Making as Perception}. MIT Press.
\bibitem{hofstadter1995}
Hofstadter, D. R., \& FARG. (1995). \textit{Fluid Concepts and Creative Analogies: Computer Models of the Fundamental Mechanisms of Thought}. Basic Books.
Hofstadter, D. R., \& FARG. (1995). \textit{Fluid Concepts and Creative Analogies}. Basic Books.
\end{thebibliography}

Binary file not shown.

After

Width:  |  Height:  |  Size: 342 KiB