defbarw(ax): for p in ax.patches: val = p.get_width() #height of the bar x = p.get_x()+ p.get_width() # x- position y = p.get_y() + p.get_height()/2#y-position ax.annotate(round(val,2),(x,y)) #finding top leaves
RuntimeError: CUDA error: device-side assert triggered CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
Output is truncated. View as a [scrollable element](command:cellOutput.enableScrolling?fe98a7bb-729b-462b-92b9-89f4d79ef849) or open in a [text editor](command:workbench.action.openLargeOutput?fe98a7bb-729b-462b-92b9-89f4d79ef849). Adjust cell output [settings](command:workbench.action.openSettings?["@tag:notebookOutputLayout"])...
import os from torchvision import datasets, transforms import pandas as pd from torch.utils.data import Dataset, DataLoader from PIL import Image import json import torch from torch import nn import torchvision.models as models from tqdm import tqdm
# 是否要冻住模型的前面一些层 defset_parameter_requires_grad(model, feature_extracting): if feature_extracting: model = model for param in model.parameters(): param.requires_grad = False # resnet34模型 defres_model(num_classes, feature_extract = False, use_pretrained=True):
model = res_model(176) # 最后的分类数量:176 model = model.to(device)
# 设置损失函数 criterion = nn.CrossEntropyLoss()
# 设置优化器 optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate, weight_decay=weight_decay)
best_acc = 0.0 for epoch inrange(epochs): # ---------- Training ---------- model.train() # These are used to record information in training. train_loss = [] train_accs = [] for batch in tqdm(train_loader): imgs, labels = batch imgs = imgs.to(device) labels = labels.to(device) logits = model(imgs) loss = criterion(logits, labels) optimizer.zero_grad()
loss.backward()
optimizer.step() # Compute the accuracy for current batch. acc = (logits.argmax(dim=-1) == labels).float().mean()
# Record the loss and accuracy. train_loss.append(loss.item()) train_accs.append(acc) # The average loss and accuracy of the training set is the average of the recorded values. train_loss = sum(train_loss) / len(train_loss) train_acc = sum(train_accs) / len(train_accs)
# Print the information. print(f"[ Train | {epoch + 1:03d}/{epochs:03d} ] loss = {train_loss:.5f}, acc = {train_acc:.5f}") # ---------- Validation ---------- model.eval() # These are used to record information in validation. valid_loss = [] valid_accs = [] for batch in tqdm(validation_loader): imgs, labels = batch imgs = imgs.to(device) labels = labels.to(device)
with torch.no_grad(): logits = model(imgs) loss = criterion(logits, labels)
# Record the loss and accuracy. valid_loss.append(loss.item()) valid_accs.append(acc) # The average loss and accuracy for entire validation set is the average of the recorded values. valid_loss = sum(valid_loss) / len(valid_loss) valid_acc = sum(valid_accs) / len(valid_accs)
# Print the information. print(f"[ Valid | {epoch + 1:03d}/{epochs:03d} ] loss = {valid_loss:.5f}, acc = {valid_acc:.5f}")
# if the model improves, save a checkpoint at this epoch if valid_acc > best_acc: best_acc = valid_acc torch.save(model.state_dict(), os.path.join(model_path, "res_model.ckpt")) print('saving model with acc {:.3f}'.format(best_acc))