十二月 21, 2025
45 分钟阅读
AI研究科学家面试问题
interview
career-advice
job-search

Milad Bonakdar
作者
围绕深度学习、Transformer、实验设计、模型评估和研究表达,准备AI研究科学家岗位常见面试问题。
介绍
AI研究科学家面试考察的是你能否像研究者一样思考:提出假设,解释设计取舍,实现核心想法,公平比较模型,并在论文讨论或研究展示中清楚说明权衡。除了深度学习和Transformer,也要准备实验设计、可复现性、安全性以及下一步研究方向。
使用这份指南练习技术准确、表达清晰的回答。强候选人会把公式和代码连接到研究判断:为什么方法可能有效,如何验证,哪些失败模式重要,以及如何表达不确定性。
深度学习理论(5 个问题)
1. 详细解释反向传播和链式法则。
答案: 反向传播使用链式法则高效计算梯度。
- 链式法则: 对于复合函数,导数是各层导数的乘积
- 前向传播: 计算输出并缓存中间值
- 反向传播: 从输出到输入计算梯度
import numpy as np
# 简单的神经网络来演示反向传播
class SimpleNN:
def __init__(self, input_size, hidden_size, output_size):
# 初始化权重
self.W1 = np.random.randn(input_size, hidden_size) * 0.01
self.b1 = np.zeros((1, hidden_size))
self.W2 = np.random.randn(hidden_size, output_size) * 0.01
self.b2 = np.zeros((1, output_size))
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(self, x):
return x * (1 - x)
def forward(self, X):
# 第 1 层
self.z1 = np.dot(X, self.W1) + self.b1
self.a1 = self.sigmoid(self.z1)
# 第 2 层
self.z2 = np.dot(self.a1, self.W2) + self.b2
self.a2 = self.sigmoid(self.z2)
return self.a2
def backward(self, X, y, output, learning_rate=0.01):
m = X.shape[0]
# 输出层梯度
# dL/da2 = a2 - y (对于二元交叉熵)
# dL/dz2 = dL/da2 * da2/dz2 = (a2 - y) * sigmoid'(z2)
dz2 = output - y
dW2 = (1/m) * np.dot(self.a1.T, dz2)
db2 = (1/m) * np.sum(dz2, axis=0, keepdims=True)
# 隐藏层梯度(链式法则)
# dL/da1 = dL/dz2 * dz2/da1 = dz2 * W2.T
# dL/dz1 = dL/da1 * da1/dz1 = dL/da1 * sigmoid'(z1)
da1 = np.dot(dz2, self.W2.T)
dz1 = da1 * self.sigmoid_derivative(self.a1)
dW1 = (1/m) * np.dot(X.T, dz1)
db1 = (1/m) * np.sum(dz1, axis=0, keepdims=True)
# 更新权重
self.W2 -= learning_rate * dW2
self.b2 -= learning_rate * db2
self.W1 -= learning_rate * dW1
self.b1 -= learning_rate * db1
def train(self, X, y, epochs=1000):
for epoch in range(epochs):
# 前向传播
output = self.forward(X)
# 反向传播
self.backward(X, y, output)
if epoch % 100 == 0:
loss = -np.mean(y * np.log(output) + (1-y) * np.log(1-output))
print(f'Epoch {epoch}, Loss: {loss:.4f}')
# 示例用法
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]]) # XOR
nn = SimpleNN(input_size=2, hidden_size=4, output_size=1)
nn.train(X, y, epochs=5000)稀有度: 非常常见 难度: 困难
2. 什么是梯度消失问题,你如何解决它?
答案: 当梯度在深度网络中变得非常小时,会发生梯度消失。
- 原因:
- Sigmoid/tanh 激活函数(导数 < 1)
- 深度网络(梯度相乘)
- 解决方案:
- ReLU 激活函数
- 批量归一化
- 残差连接(ResNet)
- LSTM/GRU 用于 RNN
- 谨慎的初始化(Xavier, He)
import torch
import torch.nn as nn
# 问题:具有 sigmoid 的深度网络
class VanishingGradientNet(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(*[
nn.Sequential(nn.Linear(100, 100), nn.Sigmoid())
for _ in range(20) # 20 层
])
def forward(self, x):
return self.layers(x)
# 解决方案 1:ReLU 激活函数
class ReLUNet(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(*[
nn.Sequential(nn.Linear(100, 100), nn.ReLU())
for _ in range(20)
])
def forward(self, x):
return self.layers(x)
# 解决方案 2:残差连接
class ResidualBlock(nn.Module):
def __init__(self, dim):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(dim, dim),
nn.ReLU(),
nn.Linear(dim, dim)
)
def forward(self, x):
return x + self.layers(x) # 跳跃连接
class ResNet(nn.Module):
def __init__(self):
super().__init__()
self.blocks = nn.Sequential(*[
ResidualBlock(100) for _ in range(20)
])
def forward(self, x):
return self.blocks(x)
# 解决方案 3:批量归一化
class BatchNormNet(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(*[
nn.Sequential(
nn.Linear(100, 100),
nn.BatchNorm1d(100),
nn.ReLU()
)
for _ in range(20)
])
def forward(self, x):
return self.layers(x)
# 梯度流分析
def analyze_gradients(model, x, y):
model.zero_grad()
output = model(x)
loss = nn.MSELoss()(output, y)
loss.backward()
# 检查梯度幅度
for name, param in model.named_parameters():
if param.grad is not None:
grad_norm = param.grad.norm().item()
print(f"{name}: {grad_norm:.6f}")稀有度: 非常常见 难度: 困难
3. 解释注意力机制和自注意力机制。
答案: 注意力允许模型关注输入的相关部分。
- 注意力: 基于查询-键相似度的值的加权和
- 自注意力: 注意力,其中查询、键、值来自同一来源
- 缩放点积注意力: Q·K^T / √d_k
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class ScaledDotProductAttention(nn.Module):
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
def forward(self, q, k, v, mask=None):
"""
q: (batch, seq_len, d_k)
k: (batch, seq_len, d_k)
v: (batch, seq_len, d_v)
"""
# 计算注意力分数
attn = torch.matmul(q, k.transpose(-2, -1)) / self.temperature
# 应用掩码(用于填充或因果注意力)
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
# Softmax 获取注意力权重
attn_weights = F.softmax(attn, dim=-1)
# 将注意力应用于值
output = torch.matmul(attn_weights, v)
return output, attn_weights
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, dropout=0.1):
super().__init__()
assert d_model % n_heads == 0
self.d_model = d_model
self.n_heads = n_heads
self.d_k = d_model // n_heads
# 线性投影
self.w_q = nn.Linear(d_model, d_model)
self.w_k = nn.Linear(d_model, d_model)
self.w_v = nn.Linear(d_model, d_model)
self.w_o = nn.Linear(d_model, d_model)
self.attention = ScaledDotProductAttention(temperature=math.sqrt(self.d_k))
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
batch_size = q.size(0)
# 线性投影并分成头
q = self.w_q(q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
k = self.w_k(k).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
v = self.w_v(v).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
# 应用注意力
output, attn_weights = self.attention(q, k, v, mask)
# 连接头
output = output.transpose(1, 2).contiguous().view(batch_size, -1, self.d_model)
# 最终线性投影
output = self.w_o(output)
return output, attn_weights
# 示例用法
d_model = 512
n_heads = 8
seq_len = 10
batch_size = 2
mha = MultiHeadAttention(d_model, n_heads)
x = torch.randn(batch_size, seq_len, d_model)
# 自注意力(q, k, v 都来自 x)
output, attn = mha(x, x, x)
print(f"Output shape: {output.shape}")
print(f"Attention weights shape: {attn.shape}")稀有度: 非常常见 难度: 困难
4. 批量归一化和层归一化之间有什么区别?
答案: 两者都对激活进行归一化,但沿不同的维度进行。
- 批量归一化:
- 跨批量维度进行归一化
- 需要批量统计
- 小批量问题,RNN 问题
- 层归一化:
- 跨特征维度进行归一化
- 独立于批量大小
- 更适合 RNN、Transformer
import torch
import torch.nn as nn
# 批量归一化
class BatchNormExample(nn.Module):
def __init__(self, num_features):
super().__init__()
self.bn = nn.BatchNorm1d(num_features)
def forward(self, x):
# x: (batch_size, num_features)
# 跨批量维度为每个特征进行归一化
return self.bn(x)
# 层归一化
class LayerNormExample(nn.Module):
def __init__(self, normalized_shape):
super().__init__()
self.ln = nn.LayerNorm(normalized_shape)
def forward(self, x):
# x: (batch_size, seq_len, d_model)
# 跨特征维度为每个样本进行归一化
return self.ln(x)
# 手动实现
class ManualLayerNorm(nn.Module):
def __init__(self, normalized_shape, eps=1e-5):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(normalized_shape))
self.beta = nn.Parameter(torch.zeros(normalized_shape))
def forward(self, x):
# 计算跨最后一个维度的均值和方差
mean = x.mean(dim=-1, keepdim=True)
var = x.var(dim=-1, keepdim=True, unbiased=False)
# 归一化
x_norm = (x - mean) / torch.sqrt(var + self.eps)
# 缩放和平移
return self.gamma * x_norm + self.beta
# 比较
batch_size, seq_len, d_model = 2, 10, 512
# 批量归一化(用于 CNN)
x_cnn = torch.randn(batch_size, d_model, 28, 28)
bn = nn.BatchNorm2d(d_model)
out_bn = bn(x_cnn)
# 层归一化(用于 Transformer)
x_transformer = torch.randn(batch_size, seq_len, d_model)
ln = nn.LayerNorm(d_model)
out_ln = ln(x_transformer)
print(f"Batch Norm output: {out_bn.shape}")
print(f"Layer Norm output: {out_ln.shape}")稀有度: 常见 难度: 中等
5. 详细解释 Transformer 架构。
答案: Transformer 使用自注意力进行序列建模,无需循环。
Loading diagram...
- 组件:
- 编码器: 自注意力 + FFN
- 解码器: 掩码自注意力 + 交叉注意力 + FFN
- 位置编码: 注入位置信息
- 多头注意力: 并行注意力机制
import torch
import torch.nn as nn
import math
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len=5000):
super().__init__()
# 创建位置编码矩阵
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
(-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return x + self.pe[:, :x.size(1)]
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, n_heads, d_ff, dropout=0.1):
super().__init__()
# 多头注意力
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
# 前馈网络
self.ffn = nn.Sequential(
nn.Linear(d_model, d_ff),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(d_ff, d_model)
)
# 层归一化
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask=None):
# 具有残差连接的自注意力
attn_output, _ = self.self_attn(x, x, x, attn_mask=mask)
x = self.norm1(x + self.dropout(attn_output))
# 具有残差连接的前馈网络
ffn_output = self.ffn(x)
x = self.norm2(x + self.dropout(ffn_output))
return x
class TransformerEncoder(nn.Module):
def __init__(self, vocab_size, d_model, n_heads, d_ff, n_layers, dropout=0.1):
super().__init__()
self.embedding = nn.Embedding(vocab_size, d_model)
self.pos_encoding = PositionalEncoding(d_model)
self.layers = nn.ModuleList([
TransformerEncoderLayer(d_model, n_heads, d_ff, dropout)
for _ in range(n_layers)
])
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask=None):
# 嵌入 + 位置编码
x = self.embedding(x) * math.sqrt(self.embedding.embedding_dim)
x = self.pos_encoding(x)
x = self.dropout(x)
# 应用编码器层
for layer in self.layers:
x = layer(x, mask)
return x
# 示例用法
vocab_size = 10000
d_model = 512
n_heads = 8
d_ff = 2048
n_layers = 6
encoder = TransformerEncoder(vocab_size, d_model, n_heads, d_ff, n_layers)
# 输入:(batch_size, seq_len)
x = torch.randint(0, vocab_size, (2, 10))
output = encoder(x)
print(f"Output shape: {output.shape}") # (2, 10, 512)稀有度: 非常常见 难度: 困难
研究方法(4 个问题)
6. 你如何提出一个研究问题和假设?
答案: 研究从识别差距和制定可测试的假设开始。
- 步骤:
- 文献综述: 了解最先进的技术
- 识别差距: 缺少什么或可以改进什么?
- 制定假设: 具体、可测试的声明
- 设计实验: 如何测试假设?
- 定义指标: 如何衡量成功?
- 例子:
- 差距: 当前模型在处理长程依赖关系时遇到困难
- 假设: 稀疏注意力可以在降低复杂性的同时保持性能
- 实验: 比较长序列上的稀疏注意力与完全注意力
- 指标: 困惑度、准确率、推理时间
稀有度: 非常常见 难度: 中等
7. 你如何设计消融研究?
答案: 消融研究隔离各个组件的贡献。
- 目的: 了解是什么使模型有效
- 方法: 一次删除/修改一个组件
- 最佳实践:
- 控制所有其他变量
- 使用相同的随机种子
- 报告置信区间
- 在多个数据集上进行测试
# 消融研究示例
class ModelWithAblations:
def __init__(self, use_attention=True, use_residual=True, use_dropout=True):
self.use_attention = use_attention
self.use_residual = use_residual
self.use_dropout = use_dropout
def build_model(self):
layers = []
if self.use_attention:
layers.append(AttentionLayer())
layers.append(FFNLayer())
if self.use_dropout:
layers.append(nn.Dropout(0.1))
if self.use_residual:
return ResidualWrapper(nn.Sequential(*layers))
else:
return nn.Sequential(*layers)
# 运行消融实验
configs = [
{'use_attention': True, 'use_residual': True, 'use_dropout': True}, # 完整模型
{'use_attention': False, 'use_residual': True, 'use_dropout': True}, # 没有注意力
{'use_attention': True, 'use_residual': False, 'use_dropout': True}, # 没有残差
{'use_attention': True, 'use_residual': True, 'use_dropout': False}, # 没有 dropout
]
results = []
for config in configs:
model = ModelWithAblations(**config)
accuracy = train_and_evaluate(model, seed=42)
results.append({**config, 'accuracy': accuracy})
# 分析结果
import pandas as pd
df = pd.DataFrame(results)
print(df)稀有度: 非常常见 难度: 中等
8. 你如何确保研究的可重复性?
答案: 可重复性对于科学有效性至关重要。
- 最佳实践:
- 代码: 版本控制,清晰的文档
- 数据: 版本,文档预处理
- 环境: Docker,requirements.txt
- 种子: 修复所有随机种子
- 超参数: 记录所有设置
- 硬件: 记录 GPU/CPU 规格
import random
import numpy as np
import torch
import os
def set_all_seeds(seed=42):
"""设置种子以实现可重复性"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
# 确定性操作
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# 记录一切
import logging
import json
def log_experiment(config, results):
experiment_log = {
'timestamp': datetime.datetime.now().isoformat(),
'config': config,
'results': results,
'environment': {
'python_version': sys.version,
'torch_version': torch.__version__,
'cuda_version': torch.version.cuda,
'gpu': torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'CPU'
}
}
with open('experiment_log.json', 'w') as f:
json.dump(experiment_log, f, indent=2)
# 共享代码和模型
"""
# README.md
## 可重复性
### 环境
```bash
conda create -n research python=3.9
conda activate research
pip install -r requirements.txt数据
下载自:[link]
预处理:python preprocess.py
训练
python train.py --config configs/experiment1.yaml --seed 42评估
python evaluate.py --checkpoint checkpoints/best_model.pt"""
**稀有度:** 非常常见
**难度:** 简单
---
### 9. 你如何公平地评估和比较模型?
**答案:**
公平的比较需要仔细的实验设计。
- **注意事项:**
- **相同的数据分割:** 使用相同的训练/验证/测试集
- **多次运行:** 报告均值和标准差
- **统计检验:** T 检验、Wilcoxon 检验
- **计算成本:** FLOPs、参数、时间
- **多个指标:** 不要挑挑拣拣
- **多个数据集:** 泛化
```python
import numpy as np
from scipy import stats
class ModelComparison:
def __init__(self, n_runs=5):
self.n_runs = n_runs
self.results = {}
def evaluate_model(self, model_name, model_fn, X_train, y_train, X_test, y_test):
scores = []
for seed in range(self.n_runs):
# 设置本次运行的种子
set_all_seeds(seed)
# 训练模型
model = model_fn()
model.fit(X_train, y_train)
# 评估
score = model.score(X_test, y_test)
scores.append(score)
self.results[model_name] = {
'scores': scores,
'mean': np.mean(scores),
'std': np.std(scores),
'ci_95': stats.t.interval(
0.95, len(scores)-1,
loc=np.mean(scores),
scale=stats.sem(scores)
)
}
def compare_models(self, model_a, model_b):
"""统计显著性检验"""
scores_a = self.results[model_a]['scores']
scores_b = self.results[model_b]['scores']
# 配对 t 检验
statistic, p_value = stats.ttest_rel(scores_a, scores_b)
return {
'statistic': statistic,
'p_value': p_value,
'significant': p_value < 0.05,
'better_model': model_a if np.mean(scores_a) > np.mean(scores_b) else model_b
}
def report(self):
for model_name, result in self.results.items():
print(f"\n{model_name}:")
print(f" Mean: {result['mean']:.4f}")
print(f" Std: {result['std']:.4f}")
print(f" 95% CI: [{result['ci_95'][0]:.4f}, {result['ci_95'][1]:.4f}]")
# 用法
comparison = ModelComparison(n_runs=10)
comparison.evaluate_model('Model A', lambda: ModelA(), X_train, y_train, X_test, y_test)
comparison.evaluate_model('Model B', lambda: ModelB(), X_train, y_train, X_test, y_test)
comparison.report()
result = comparison.compare_models('Model A', 'Model B')
print(f"\nStatistical test: p-value = {result['p_value']:.4f}")
稀有度: 非常常见 难度: 中等
高级主题(4 个问题)
10. 解释对比学习及其应用。
答案: 对比学习通过比较相似和不相似的样本来学习表示。
- 关键思想: 将相似的样本拉近,将不相似的样本推远
- 损失函数: InfoNCE, NT-Xent
- 应用: SimCLR, MoCo, CLIP
import torch
import torch.nn as nn
import torch.nn.functional as F
class ContrastiveLoss(nn.Module):
def __init__(self, temperature=0.5):
super().__init__()
self.temperature = temperature
def forward(self, features):
"""
features: (2*batch_size, dim) - 增强样本对
"""
batch_size = features.shape[0] // 2
# 归一化特征
features = F.normalize(features, dim=1)
# 计算相似度矩阵
similarity_matrix = torch.matmul(features, features.T)
# 创建标签(正样本对)
labels = torch.cat([torch.arange(batch_size) + batch_size,
torch.arange(batch_size)]).to(features.device)
# 掩码以移除自相似性
mask = torch.eye(2 * batch_size, dtype=torch.bool).to(features.device)
similarity_matrix = similarity_matrix.masked_fill(mask, -9e15)
# 计算损失
similarity_matrix = similarity_matrix / self.temperature
loss = F.cross_entropy(similarity_matrix, labels)
return loss
class SimCLR(nn.Module):
def __init__(self, encoder, projection_dim=128):
super().__init__()
self.encoder = encoder
self.projection = nn.Sequential(
nn.Linear(encoder.output_dim, 512),
nn.ReLU(),
nn.Linear(512, projection_dim)
)
def forward(self, x1, x2):
# 编码两个增强视图
h1 = self.encoder(x1)
h2 = self.encoder(x2)
# 投影到对比空间
z1 = self.projection(h1)
z2 = self.projection(h2)
# 连接以进行对比损失
features = torch.cat([z1, z2], dim=0)
return features
# 训练循环
model = SimCLR(encoder, projection_dim=128)
criterion = ContrastiveLoss(temperature=0.5)
optimizer = torch.optim.Adam(model.parameters())
for epoch in range(100):
for batch in dataloader:
# 获取两个增强视图
x1, x2 = augment(batch)
# 前向传播
features = model(x1, x2)
loss = criterion(features)
# 反向传播
optimizer.zero_grad()
loss.backward()
optimizer.step()稀有度: 常见 难度: 困难
11. 什么是 Vision Transformers (ViT),它们是如何工作的?
答案: Vision Transformers 将 Transformer 架构应用于图像。
- 关键思想:
- 将图像分割成块
- 块的线性嵌入
- 添加位置嵌入
- 应用 Transformer 编码器
- 优点: 可扩展性,全局感受野
- 挑战: 需要大型数据集
import torch
import torch.nn as nn
class PatchEmbedding(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_channels=3, embed_dim=768):
super().__init__()
self.img_size = img_size
self.patch_size = patch_size
self.n_patches = (img_size // patch_size) ** 2
# 卷积提取块并嵌入
self.projection = nn.Conv2d(
in_channels, embed_dim,
kernel_size=patch_size,
stride=patch_size
)
def forward(self, x):
# x: (batch, channels, height, width)
x = self.projection(x) # (batch, embed_dim, n_patches_h, n_patches_w)
x = x.flatten(2) # (batch, embed_dim, n_patches)
x = x.transpose(1, 2) # (batch, n_patches, embed_dim)
return x
class VisionTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_channels=3,
embed_dim=768, n_heads=12, n_layers=12, num_classes=1000):
super().__init__()
# 块嵌入
self.patch_embed = PatchEmbedding(img_size, patch_size, in_channels, embed_dim)
n_patches = self.patch_embed.n_patches
# 类别 token
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# 位置嵌入
self.pos_embed = nn.Parameter(torch.zeros(1, n_patches + 1, embed_dim))
# Transformer 编码器
encoder_layer = nn.TransformerEncoderLayer(
d_model=embed_dim,
nhead=n_heads,
dim_feedforward=4*embed_dim,

