|
|
@@ -124,6 +124,116 @@ def extract_symbol_code(target_str):
|
|
|
return None
|
|
|
|
|
|
|
|
|
+DETAIL_KEYS = ['prev_close', 'open', 'MA5', 'MA10', 'MA20', 'MA30', 'MA60', 'AVG5']
|
|
|
+
|
|
|
+
|
|
|
+def parse_details(details_str):
|
|
|
+ """
|
|
|
+ 将details列拆解成独立字段
|
|
|
+ """
|
|
|
+ if pd.isna(details_str):
|
|
|
+ return {}
|
|
|
+ result = {}
|
|
|
+ for part in str(details_str).split('|'):
|
|
|
+ if ':' not in part:
|
|
|
+ continue
|
|
|
+ key, value = part.split(':', 1)
|
|
|
+ key = key.strip()
|
|
|
+ raw_value = value.strip()
|
|
|
+ is_percent = False
|
|
|
+ if raw_value.endswith('%'):
|
|
|
+ is_percent = True
|
|
|
+ raw_value = raw_value[:-1]
|
|
|
+ try:
|
|
|
+ numeric_value = float(raw_value)
|
|
|
+ if is_percent:
|
|
|
+ numeric_value /= 100
|
|
|
+ result[key] = numeric_value
|
|
|
+ except (ValueError, TypeError):
|
|
|
+ result[key] = np.nan
|
|
|
+ return result
|
|
|
+
|
|
|
+
|
|
|
+def infer_trade_direction(trade_type):
|
|
|
+ """
|
|
|
+ 根据交易类型推断方向(多/空)
|
|
|
+ """
|
|
|
+ if not isinstance(trade_type, str):
|
|
|
+ return 'unknown'
|
|
|
+ lowered = trade_type.lower()
|
|
|
+ if '多' in trade_type or 'long' in lowered:
|
|
|
+ return 'long'
|
|
|
+ if '空' in trade_type or 'short' in lowered:
|
|
|
+ return 'short'
|
|
|
+ return 'unknown'
|
|
|
+
|
|
|
+
|
|
|
+def evaluate_trend_alignment(row):
|
|
|
+ """
|
|
|
+ 基于MA60、成交价、open与交易方向判断趋势是否一致
|
|
|
+ """
|
|
|
+ direction = infer_trade_direction(row.get('交易类型'))
|
|
|
+ ma60 = row.get('MA60')
|
|
|
+ trade_price = row.get('成交价')
|
|
|
+ open_price = row.get('open')
|
|
|
+ if direction == 'unknown' or pd.isna(ma60) or pd.isna(trade_price) or pd.isna(open_price):
|
|
|
+ return '数据不足'
|
|
|
+ if direction == 'long':
|
|
|
+ return '一致' if trade_price >= ma60 and open_price >= ma60 else '不一致'
|
|
|
+ if direction == 'short':
|
|
|
+ return '一致' if trade_price <= ma60 and open_price <= ma60 else '不一致'
|
|
|
+ return '数据不足'
|
|
|
+
|
|
|
+
|
|
|
+def calculate_ma_compaction(row, ma_columns):
|
|
|
+ """
|
|
|
+ 计算多条均线的聚合度:标准差 / 均值
|
|
|
+ """
|
|
|
+ values = [row.get(col) for col in ma_columns]
|
|
|
+ if any(pd.isna(v) for v in values):
|
|
|
+ return np.nan
|
|
|
+ mean_val = np.mean(values)
|
|
|
+ if mean_val == 0:
|
|
|
+ return np.nan
|
|
|
+ std_val = np.std(values, ddof=0)
|
|
|
+ return std_val / mean_val
|
|
|
+
|
|
|
+
|
|
|
+def calculate_gap_ratio(prev_close, open_price):
|
|
|
+ """
|
|
|
+ 计算跳空比例
|
|
|
+ """
|
|
|
+ if pd.isna(prev_close) or prev_close == 0 or pd.isna(open_price):
|
|
|
+ return np.nan
|
|
|
+ return abs(open_price - prev_close) / abs(prev_close)
|
|
|
+
|
|
|
+
|
|
|
+def calculate_relative_gap(prev_close, open_price, avg_5day_change):
|
|
|
+ """
|
|
|
+ 计算相对跳空比例:跳空比率 / avg_5day_change
|
|
|
+ """
|
|
|
+ base_gap = calculate_gap_ratio(prev_close, open_price)
|
|
|
+ if pd.isna(base_gap) or pd.isna(avg_5day_change) or avg_5day_change == 0:
|
|
|
+ return np.nan
|
|
|
+ return base_gap / abs(avg_5day_change)
|
|
|
+
|
|
|
+
|
|
|
+def assign_quantile_labels(df, source_col, target_col, q=4):
|
|
|
+ """
|
|
|
+ 根据分位数为连续指标打标签
|
|
|
+ """
|
|
|
+ labels = [f'Q{i+1}' for i in range(q)]
|
|
|
+ valid = df[source_col].dropna()
|
|
|
+ unique_count = valid.nunique()
|
|
|
+ if valid.empty or unique_count < 2:
|
|
|
+ df[target_col] = np.nan
|
|
|
+ return
|
|
|
+ bins = min(q, unique_count)
|
|
|
+ quantiles = pd.qcut(valid, q=bins, labels=labels[:bins], duplicates='drop')
|
|
|
+ df[target_col] = np.nan
|
|
|
+ df.loc[quantiles.index, target_col] = quantiles.astype(str)
|
|
|
+
|
|
|
+
|
|
|
def calculate_time_segment(order_time_str, trading_start_time_str):
|
|
|
"""
|
|
|
计算开仓时间相对于交易开始时间的时间段
|
|
|
@@ -189,6 +299,32 @@ def load_and_preprocess_data(csv_path):
|
|
|
|
|
|
print(f"原始数据行数: {len(df)}")
|
|
|
print(f"数据列: {df.columns.tolist()}")
|
|
|
+
|
|
|
+ # 解析details列,补充所需字段
|
|
|
+ if 'details' in df.columns:
|
|
|
+ details_df = df['details'].apply(parse_details).apply(pd.Series)
|
|
|
+ for key in DETAIL_KEYS:
|
|
|
+ if key in details_df.columns:
|
|
|
+ df[key] = details_df[key]
|
|
|
+ elif key not in df.columns:
|
|
|
+ df[key] = np.nan
|
|
|
+ else:
|
|
|
+ for key in DETAIL_KEYS:
|
|
|
+ if key not in df.columns:
|
|
|
+ df[key] = np.nan
|
|
|
+
|
|
|
+ if '成交价' not in df.columns:
|
|
|
+ df['成交价'] = np.nan
|
|
|
+
|
|
|
+ avg5_col = 'avg_5day_change'
|
|
|
+ if avg5_col not in df.columns:
|
|
|
+ if 'AVG5' in df.columns:
|
|
|
+ df[avg5_col] = df['AVG5']
|
|
|
+ else:
|
|
|
+ df[avg5_col] = np.nan
|
|
|
+ else:
|
|
|
+ if 'AVG5' in df.columns:
|
|
|
+ df[avg5_col] = df[avg5_col].fillna(df['AVG5'])
|
|
|
|
|
|
# 提取品种代码
|
|
|
df['品种代码'] = df['标的'].apply(extract_symbol_code)
|
|
|
@@ -229,6 +365,26 @@ def load_and_preprocess_data(csv_path):
|
|
|
df['成交额分组'] = pd.cut(df['成交额'],
|
|
|
bins=[0, 100000, 200000, 500000, float('inf')],
|
|
|
labels=['<10万', '10-20万', '20-50万', '>50万'])
|
|
|
+
|
|
|
+ # 趋势一致性与衍生指标
|
|
|
+ df['趋势一致'] = df.apply(evaluate_trend_alignment, axis=1)
|
|
|
+ df['均线聚合度_5_10_20_30'] = df.apply(
|
|
|
+ lambda row: calculate_ma_compaction(row, ['MA5', 'MA10', 'MA20', 'MA30']), axis=1
|
|
|
+ )
|
|
|
+ df['均线聚合度_5_10_20'] = df.apply(
|
|
|
+ lambda row: calculate_ma_compaction(row, ['MA5', 'MA10', 'MA20']), axis=1
|
|
|
+ )
|
|
|
+ assign_quantile_labels(df, '均线聚合度_5_10_20_30', '均线聚合度_5_10_20_30_分位')
|
|
|
+ assign_quantile_labels(df, '均线聚合度_5_10_20', '均线聚合度_5_10_20_分位')
|
|
|
+
|
|
|
+ df['跳空比率'] = df.apply(
|
|
|
+ lambda row: calculate_gap_ratio(row.get('prev_close'), row.get('open')), axis=1
|
|
|
+ )
|
|
|
+ df['跳空相对波动'] = df.apply(
|
|
|
+ lambda row: calculate_relative_gap(row.get('prev_close'), row.get('open'), row.get('avg_5day_change')),
|
|
|
+ axis=1
|
|
|
+ )
|
|
|
+ assign_quantile_labels(df, '跳空相对波动', '跳空相对波动分位')
|
|
|
|
|
|
print(f"预处理后数据行数: {len(df)}")
|
|
|
print(f"品种代码提取成功率: {df['品种代码'].notna().sum() / len(df) * 100:.2f}%")
|
|
|
@@ -453,6 +609,123 @@ def analyze_additional_dimensions(df):
|
|
|
}
|
|
|
|
|
|
|
|
|
+def analyze_trend_alignment(df):
|
|
|
+ """
|
|
|
+ 趋势一致性分析
|
|
|
+ """
|
|
|
+ print("\n" + "="*80)
|
|
|
+ print("趋势一致性分析(基于MA60 vs 成交价/open)")
|
|
|
+ print("="*80)
|
|
|
+
|
|
|
+ if df['趋势一致'].dropna().empty:
|
|
|
+ print("暂无可用数据")
|
|
|
+ return None
|
|
|
+
|
|
|
+ trend_stats = df.groupby('趋势一致').apply(calculate_statistics).round(4)
|
|
|
+ print(trend_stats.to_string())
|
|
|
+
|
|
|
+ return trend_stats
|
|
|
+
|
|
|
+
|
|
|
+def analyze_ma_compaction(df):
|
|
|
+ """
|
|
|
+ 均线聚合度分析
|
|
|
+ """
|
|
|
+ print("\n" + "="*80)
|
|
|
+ print("均线聚合度分析(标准差/均值)")
|
|
|
+ print("="*80)
|
|
|
+
|
|
|
+ compaction_results = {}
|
|
|
+ config = [
|
|
|
+ ('均线聚合度_5_10_20_30', 'MA5/MA10/MA20/MA30', '均线聚合度_5_10_20_30_分位', 'ma_compaction_ma5_ma30'),
|
|
|
+ ('均线聚合度_5_10_20', 'MA5/MA10/MA20', '均线聚合度_5_10_20_分位', 'ma_compaction_ma5_ma20')
|
|
|
+ ]
|
|
|
+
|
|
|
+ for col, label, quantile_col, result_key in config:
|
|
|
+ print(f"\n--- {label} ---")
|
|
|
+ if col not in df.columns or df[col].dropna().empty:
|
|
|
+ print("数据不足,无法分析。")
|
|
|
+ compaction_results[result_key] = None
|
|
|
+ continue
|
|
|
+
|
|
|
+ print(f"{label} 描述统计:均值={df[col].mean():.4f}, 中位数={df[col].median():.4f}, 最大值={df[col].max():.4f}")
|
|
|
+ if quantile_col not in df.columns or df[quantile_col].dropna().empty:
|
|
|
+ print("分位标签缺失,跳过统计。")
|
|
|
+ compaction_results[result_key] = None
|
|
|
+ continue
|
|
|
+
|
|
|
+ stats = df.groupby(quantile_col).apply(calculate_statistics).round(4)
|
|
|
+ print(stats.to_string())
|
|
|
+ compaction_results[result_key] = stats
|
|
|
+
|
|
|
+ return compaction_results
|
|
|
+
|
|
|
+
|
|
|
+def analyze_gap_behavior(df):
|
|
|
+ """
|
|
|
+ 跳空行为分析
|
|
|
+ """
|
|
|
+ print("\n" + "="*80)
|
|
|
+ print("跳空行为分析")
|
|
|
+ print("="*80)
|
|
|
+
|
|
|
+ if '跳空比率' not in df.columns or df['跳空比率'].dropna().empty:
|
|
|
+ print("缺少跳空数据,无法分析。")
|
|
|
+ return None
|
|
|
+
|
|
|
+ print(f"跳空比率描述:均值={df['跳空比率'].mean():.4f}, 最大值={df['跳空比率'].max():.4f}")
|
|
|
+ if '跳空相对波动' in df.columns and not df['跳空相对波动'].dropna().empty:
|
|
|
+ print(f"跳空相对波动描述:均值={df['跳空相对波动'].mean():.4f}, 最大值={df['跳空相对波动'].max():.4f}")
|
|
|
+
|
|
|
+ if '跳空相对波动分位' not in df.columns or df['跳空相对波动分位'].dropna().empty:
|
|
|
+ print("跳空相对波动分位标签缺失,跳过分组统计。")
|
|
|
+ return None
|
|
|
+
|
|
|
+ gap_stats = df.groupby('跳空相对波动分位').apply(calculate_statistics).round(4)
|
|
|
+ print("\n按跳空相对波动分位的表现:")
|
|
|
+ print(gap_stats.to_string())
|
|
|
+
|
|
|
+ return gap_stats
|
|
|
+
|
|
|
+
|
|
|
+def analyze_enhanced_cross_metrics(df):
|
|
|
+ """
|
|
|
+ 将新增指标与核心维度(均线组合、开盘后时间段)交叉对比
|
|
|
+ """
|
|
|
+ print("\n" + "="*80)
|
|
|
+ print("扩展指标交叉分析(趋势一致/均线聚合度/跳空 vs 核心维度)")
|
|
|
+ print("="*80)
|
|
|
+
|
|
|
+ config = [
|
|
|
+ (['crossed_ma_lines', '趋势一致'], '趋势一致 × 均线组合', 'trend_vs_ma'),
|
|
|
+ (['开盘后时间段', '趋势一致'], '趋势一致 × 开盘后时间段', 'trend_vs_time'),
|
|
|
+ (['crossed_ma_lines', '均线聚合度_5_10_20_30_分位'], '均线聚合度(4) × 均线组合', 'ma_compact4_vs_ma'),
|
|
|
+ (['开盘后时间段', '均线聚合度_5_10_20_30_分位'], '均线聚合度(4) × 开盘后时间段', 'ma_compact4_vs_time'),
|
|
|
+ (['crossed_ma_lines', '均线聚合度_5_10_20_分位'], '均线聚合度(3) × 均线组合', 'ma_compact3_vs_ma'),
|
|
|
+ (['开盘后时间段', '均线聚合度_5_10_20_分位'], '均线聚合度(3) × 开盘后时间段', 'ma_compact3_vs_time'),
|
|
|
+ (['crossed_ma_lines', '跳空相对波动分位'], '跳空相对波动 × 均线组合', 'gap_vs_ma'),
|
|
|
+ (['开盘后时间段', '跳空相对波动分位'], '跳空相对波动 × 开盘后时间段', 'gap_vs_time'),
|
|
|
+ ]
|
|
|
+
|
|
|
+ results = {}
|
|
|
+ for group_cols, title, key in config:
|
|
|
+ missing_cols = [col for col in group_cols if col not in df.columns]
|
|
|
+ if missing_cols:
|
|
|
+ print(f"\n{title}: 缺少列 {missing_cols},跳过。")
|
|
|
+ results[key] = None
|
|
|
+ continue
|
|
|
+ if df[group_cols[1]].dropna().empty:
|
|
|
+ print(f"\n{title}: 数据不足,跳过。")
|
|
|
+ results[key] = None
|
|
|
+ continue
|
|
|
+ stats = df.groupby(group_cols).apply(calculate_statistics).round(4)
|
|
|
+ print(f"\n{title}")
|
|
|
+ print(stats.to_string())
|
|
|
+ results[key] = stats
|
|
|
+
|
|
|
+ return results
|
|
|
+
|
|
|
+
|
|
|
def create_visualizations(df, ma_stats, time_stats, cross_winrate, cross_profit, cross_return, output_dir):
|
|
|
"""
|
|
|
创建数据可视化图表
|
|
|
@@ -664,11 +937,97 @@ def create_visualizations(df, ma_stats, time_stats, cross_winrate, cross_profit,
|
|
|
plt.savefig(os.path.join(output_dir, 'variety_analysis.png'), dpi=150, bbox_inches='tight')
|
|
|
print(f"已保存: {os.path.join(output_dir, 'variety_analysis.png')}")
|
|
|
plt.close()
|
|
|
+
|
|
|
+ # 5. 扩展指标热力图
|
|
|
+ def build_metric_pivot(source_df, row_field, col_field, value_field, row_order=None, col_order=None):
|
|
|
+ filtered = source_df.dropna(subset=[col_field])
|
|
|
+ if row_order is not None:
|
|
|
+ filtered = filtered[filtered[row_field].isin(row_order)]
|
|
|
+ pivot = pd.pivot_table(
|
|
|
+ filtered,
|
|
|
+ index=row_field,
|
|
|
+ columns=col_field,
|
|
|
+ values=value_field,
|
|
|
+ aggfunc='mean'
|
|
|
+ )
|
|
|
+ if row_order is not None:
|
|
|
+ pivot = pivot.reindex([idx for idx in row_order if idx in pivot.index])
|
|
|
+ if col_order is not None:
|
|
|
+ pivot = pivot[[col for col in col_order if col in pivot.columns]]
|
|
|
+ return pivot
|
|
|
+
|
|
|
+ def plot_heatmap(ax, data, title, fmt='.2f', center=None, vmin=None, vmax=None, cbar_label=''):
|
|
|
+ if data.empty:
|
|
|
+ ax.axis('off')
|
|
|
+ ax.set_title(f"{title}(数据不足)")
|
|
|
+ return
|
|
|
+ sns.heatmap(
|
|
|
+ data,
|
|
|
+ annot=True,
|
|
|
+ fmt=fmt,
|
|
|
+ cmap='RdYlGn',
|
|
|
+ center=center,
|
|
|
+ vmin=vmin,
|
|
|
+ vmax=vmax,
|
|
|
+ ax=ax,
|
|
|
+ cbar_kws={'label': cbar_label}
|
|
|
+ )
|
|
|
+ ax.set_title(title)
|
|
|
+ ax.set_xlabel(data.columns.name or '')
|
|
|
+ ax.set_ylabel(data.index.name or '')
|
|
|
+
|
|
|
+ enhanced_metric_configs = [
|
|
|
+ ('趋势一致', 'trend_alignment_cross.png', '趋势一致', ['一致', '不一致', '数据不足']),
|
|
|
+ ('均线聚合度_5_10_20_30_分位', 'ma_compaction_4lines_cross.png', '均线聚合度(MA5/MA10/MA20/MA30)', None),
|
|
|
+ ('均线聚合度_5_10_20_分位', 'ma_compaction_3lines_cross.png', '均线聚合度(MA5/MA10/MA20)', None),
|
|
|
+ ('跳空相对波动分位', 'gap_behavior_cross.png', '跳空相对波动', None),
|
|
|
+ ]
|
|
|
+
|
|
|
+ row_configs = [
|
|
|
+ ('crossed_ma_lines', 'Top 15 均线组合', list(ma_stats.head(15).index)),
|
|
|
+ ('开盘后时间段', '开盘后时间段', ['<30分钟', '30-60分钟', '>1小时', '未知']),
|
|
|
+ ]
|
|
|
+
|
|
|
+ value_configs = [
|
|
|
+ ('是否盈利', '胜率', '.2f', 0.5, 0, 1, '胜率'),
|
|
|
+ ('交易盈亏', '平均盈亏', '.0f', 0, None, None, '平均盈亏(元)'),
|
|
|
+ ('保证金收益率', '平均保证金收益率', '.2f', 0, None, None, '平均保证金收益率(%)'),
|
|
|
+ ]
|
|
|
+
|
|
|
+ for metric_field, filename, metric_title, col_order in enhanced_metric_configs:
|
|
|
+ if metric_field not in df.columns or df[metric_field].dropna().empty:
|
|
|
+ continue
|
|
|
+ fig, axes = plt.subplots(len(row_configs), len(value_configs), figsize=(22, 12))
|
|
|
+ for row_idx, (row_field, row_label, row_order) in enumerate(row_configs):
|
|
|
+ for col_idx, (value_field, value_label, fmt, center, vmin, vmax, cbar_label) in enumerate(value_configs):
|
|
|
+ ax = axes[row_idx, col_idx]
|
|
|
+ pivot = build_metric_pivot(df, row_field, metric_field, value_field, row_order=row_order, col_order=col_order)
|
|
|
+ ax.set_title(f"{row_label} - {value_label}")
|
|
|
+ plot_heatmap(
|
|
|
+ ax,
|
|
|
+ pivot,
|
|
|
+ f"{row_label} - {value_label}",
|
|
|
+ fmt=fmt,
|
|
|
+ center=center,
|
|
|
+ vmin=vmin,
|
|
|
+ vmax=vmax,
|
|
|
+ cbar_label=cbar_label
|
|
|
+ )
|
|
|
+ ax.set_xlabel(metric_title)
|
|
|
+ ax.set_ylabel(row_label)
|
|
|
+ plt.suptitle(f"{metric_title} × 核心维度表现", fontsize=16)
|
|
|
+ plt.tight_layout(rect=[0, 0, 1, 0.97])
|
|
|
+ output_path = os.path.join(output_dir, filename)
|
|
|
+ plt.savefig(output_path, dpi=150, bbox_inches='tight')
|
|
|
+ print(f"已保存: {output_path}")
|
|
|
+ plt.close()
|
|
|
|
|
|
print("\n所有图表已生成!")
|
|
|
|
|
|
|
|
|
-def save_results_to_csv(df, ma_stats, time_stats, output_dir):
|
|
|
+def save_results_to_csv(df, ma_stats, time_stats, output_dir,
|
|
|
+ trend_alignment_stats=None, ma_compaction_stats=None,
|
|
|
+ gap_stats=None, enhanced_cross_stats=None):
|
|
|
"""
|
|
|
保存分析结果到CSV
|
|
|
"""
|
|
|
@@ -707,6 +1066,32 @@ def save_results_to_csv(df, ma_stats, time_stats, output_dir):
|
|
|
combo_stats.to_csv(output_file, encoding='utf-8-sig')
|
|
|
print(f"已保存组合策略统计: {output_file}")
|
|
|
|
|
|
+ if trend_alignment_stats is not None:
|
|
|
+ output_file = os.path.join(output_dir, 'trend_alignment_stats.csv')
|
|
|
+ trend_alignment_stats.to_csv(output_file, encoding='utf-8-sig')
|
|
|
+ print(f"已保存趋势一致性统计: {output_file}")
|
|
|
+
|
|
|
+ if ma_compaction_stats:
|
|
|
+ for key, stats in ma_compaction_stats.items():
|
|
|
+ if stats is None:
|
|
|
+ continue
|
|
|
+ output_file = os.path.join(output_dir, f'{key}_stats.csv')
|
|
|
+ stats.to_csv(output_file, encoding='utf-8-sig')
|
|
|
+ print(f"已保存均线聚合度统计: {output_file}")
|
|
|
+
|
|
|
+ if gap_stats is not None:
|
|
|
+ output_file = os.path.join(output_dir, 'gap_behavior_stats.csv')
|
|
|
+ gap_stats.to_csv(output_file, encoding='utf-8-sig')
|
|
|
+ print(f"已保存跳空行为统计: {output_file}")
|
|
|
+
|
|
|
+ if enhanced_cross_stats:
|
|
|
+ for key, stats in enhanced_cross_stats.items():
|
|
|
+ if stats is None:
|
|
|
+ continue
|
|
|
+ output_file = os.path.join(output_dir, f'{key}_stats.csv')
|
|
|
+ stats.to_csv(output_file, encoding='utf-8-sig')
|
|
|
+ print(f"已保存 {key} 交叉统计: {output_file}")
|
|
|
+
|
|
|
|
|
|
def main():
|
|
|
"""
|
|
|
@@ -735,12 +1120,25 @@ def main():
|
|
|
cross_count, cross_winrate, cross_profit, cross_return = analyze_cross_dimension(df)
|
|
|
trade_type_stats, variety_stats, symbol_stats = analyze_trade_type_and_variety(df)
|
|
|
additional_stats = analyze_additional_dimensions(df)
|
|
|
+ trend_alignment_stats = analyze_trend_alignment(df)
|
|
|
+ ma_compaction_stats = analyze_ma_compaction(df)
|
|
|
+ gap_stats = analyze_gap_behavior(df)
|
|
|
+ enhanced_cross_stats = analyze_enhanced_cross_metrics(df)
|
|
|
|
|
|
# 生成可视化图表
|
|
|
create_visualizations(df, ma_stats, time_stats, cross_winrate, cross_profit, cross_return, output_dir)
|
|
|
|
|
|
# 保存结果到CSV
|
|
|
- save_results_to_csv(df, ma_stats, time_stats, output_dir)
|
|
|
+ save_results_to_csv(
|
|
|
+ df,
|
|
|
+ ma_stats,
|
|
|
+ time_stats,
|
|
|
+ output_dir,
|
|
|
+ trend_alignment_stats=trend_alignment_stats,
|
|
|
+ ma_compaction_stats=ma_compaction_stats,
|
|
|
+ gap_stats=gap_stats,
|
|
|
+ enhanced_cross_stats=enhanced_cross_stats
|
|
|
+ )
|
|
|
|
|
|
print("\n" + "="*80)
|
|
|
print("分析完成!")
|