I am using a large dataset on tsai library that I am able to load in memory but does not have enough GPU memory to convert it to a tensor.
Below is my code, can somebody please help to let me know how can I use a dataloader to load this data from and use GPU acceleration.
import os
os.chdir(os.path.dirname(os.path.abspath(__file__)))
import numpy as np
import multiprocessing
from tsai.basics import *
from tsai.all import *
from fastai.metrics import Recall, Precision
from fastai.callback.tracker import SaveModelCallback, EarlyStoppingCallback
from fastai.losses import FocalLoss
def train(dataset_idx):
print("starting process:", dataset_idx)
X_train = np.load(str(dataset_idx)+'_X_train.npy').transpose((0, 2, 1))
y_train = np.load(str(dataset_idx)+'_y_train.npy').astype(int)
X_test = np.load(str(dataset_idx)+'_X_test.npy').transpose((0, 2, 1))
y_test = np.load(str(dataset_idx)+'_y_test.npy').astype(int)
l = X_train.shape[0]
print("data loaded")
X_train = np.concatenate([X_train, X_test], axis=0)
y_train = np.concatenate([y_train, y_test], axis=0)
del X_test, y_test
splits = [i for i in range(l)], [i for i in range(l, X_train.shape[0])]
print("dataset generated")
tfms = [None, TSClassification()]
batch_tfms = TSStandardize()
precision = Precision()
recall = Recall()
save_callback = SaveModelCallback(monitor='valid_loss', comp=None, fname='sample_best_model', every_epoch=False, at_end=False, with_opt=False, reset_on_fit=True)
early_stopping = EarlyStoppingCallback(monitor='valid_loss', patience=10)
clf = TSClassifier(X_train, y_train, splits=splits, arch="InceptionTimePlus", loss_func=FocalLoss(), tfms=tfms, batch_tfms=batch_tfms, bs=[2048], metrics=[precision, recall], cbs=[save_callback, early_stopping])
clf.fit_one_cycle(10, 2.5e-4)
clf.export("sample_trained_model.pkl")