added classification, results are bad...
This commit is contained in:
64803
toni/octave/features.txt
64803
toni/octave/features.txt
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,10 @@
|
||||
display("functions")
|
||||
source("settings.m");
|
||||
|
||||
|
||||
function files = getDataFiles(clsName, trainsetPerClass)
|
||||
|
||||
setDataDir = "/home/toni/Documents/handygames/HandyGames/daten/";
|
||||
|
||||
global setDataDir;
|
||||
|
||||
dir = strcat(setDataDir, clsName, "/");
|
||||
lst = readdir(dir);
|
||||
@@ -36,13 +38,16 @@ function samples = getSamplesForClass(clsName, trainsetPerClass, start, percent)
|
||||
display(strcat("training data for '", clsName, "': ", num2str(length(samples)), " samples"));
|
||||
end
|
||||
|
||||
function data = getRawTrainData(trainsetPerClass)
|
||||
function data = getRawTrainData()
|
||||
|
||||
#global trainsetPerClass;
|
||||
|
||||
data = {};
|
||||
data{1}.samples = getSamplesForClass("forwardbend", trainsetPerClass, 10, 0.9);
|
||||
data{2}.samples = getSamplesForClass("kneebend", trainsetPerClass, 10, 0.9);
|
||||
data{3}.samples = getSamplesForClass("pushups", trainsetPerClass, 10, 0.9);
|
||||
data{4}.samples = getSamplesForClass("situps", trainsetPerClass, 10, 0.9);
|
||||
data{5}.samples = getSamplesForClass("jumpingjack", trainsetPerClass, 10, 0.9);
|
||||
data{1}.samples = getSamplesForClass("forwardbend", 9, 10, 0.95);
|
||||
data{2}.samples = getSamplesForClass("kneebend", 11, 10, 0.95);
|
||||
data{3}.samples = getSamplesForClass("pushups", 11, 10, 0.95);
|
||||
data{4}.samples = getSamplesForClass("situps", 8, 10, 0.95);
|
||||
data{5}.samples = getSamplesForClass("jumpingjack", 13, 10, 0.95);
|
||||
end
|
||||
|
||||
function plotData(data,outPath)
|
||||
@@ -75,17 +80,15 @@ function filteredData = filterData(data)
|
||||
end
|
||||
|
||||
function win = window(rawVec, posMS)
|
||||
|
||||
global setWindowSize;
|
||||
pos = posMS / 10;
|
||||
#check if out of bounce, if yes- fill with zeros
|
||||
#the last windows are sometimes filled with a lot of zeros... this could cause problems
|
||||
if length(rawVec) <= pos+100-1
|
||||
#win = rawVec(pos-100:length(rawVec),:);
|
||||
#fillOut = zeros((pos+100-1) - length(rawVec), 3);
|
||||
#length(fillOut)
|
||||
#win = [win; fillOut];
|
||||
|
||||
#only full windows are accepted.
|
||||
if length(rawVec) <= pos+(setWindowSize/2)-1
|
||||
win = [];
|
||||
else
|
||||
win = rawVec(pos-100:pos+100-1,:); #100*10 ms is half the window size. that is 2s for each window.
|
||||
win = rawVec(pos-(setWindowSize/2):pos+(setWindowSize/2)-1,:); #100*10 ms is half the window size.
|
||||
endif
|
||||
end
|
||||
|
||||
@@ -100,14 +103,24 @@ function out = getMagnitude(data)
|
||||
end
|
||||
|
||||
function windowedData = windowData(data)
|
||||
windowedData = {};
|
||||
|
||||
global setWindowSize;
|
||||
global setWindowSliding;
|
||||
windowedData = {};
|
||||
|
||||
for k = 1:numel(data)
|
||||
for j = 1:numel(data{k}.samples)
|
||||
|
||||
winsAccel = {};
|
||||
winsGyro = {};
|
||||
winsMagnet = {};
|
||||
#init
|
||||
winsAccelX = {};
|
||||
winsAccelY = {};
|
||||
winsAccelZ = {};
|
||||
winsGyroX = {};
|
||||
winsGyroY = {};
|
||||
winsGyroZ = {};
|
||||
winsMagnetX = {};
|
||||
winsMagnetY = {};
|
||||
winsMagnetZ = {};
|
||||
|
||||
winsAccelPCA = {};
|
||||
winsGyroPCA = {};
|
||||
@@ -118,12 +131,15 @@ windowedData = {};
|
||||
winsMagnetMG = {};
|
||||
|
||||
winEnd = length(data{k}.samples{j}.raw{1}) * 10; # *10ms
|
||||
winBuffer = ((setWindowSize/2)+1) * 10;
|
||||
|
||||
for i = 1010:200:winEnd-1010 #200ms steps for sliding/overlapping windows
|
||||
for i = winBuffer:setWindowSliding:winEnd-winBuffer #steps for sliding/overlapping windows
|
||||
|
||||
#accel
|
||||
winAccel = window(data{k}.samples{j}.raw{1}, i);
|
||||
winsAccel = [winsAccel winAccel];
|
||||
winsAccelX = [winsAccelX winAccel(:,1)];
|
||||
winsAccelY = [winsAccelY winAccel(:,2)];
|
||||
winsAccelZ = [winsAccelZ winAccel(:,3)];
|
||||
|
||||
[coeff1, score1] = princomp(winAccel);
|
||||
winsAccelPCA = [winsAccelPCA score1(:,1)]; #choose the first axis (eigvec with the biggest eigvalue)
|
||||
@@ -133,7 +149,9 @@ windowedData = {};
|
||||
|
||||
#gyro
|
||||
winGyro = window(data{k}.samples{j}.raw{2}, i);
|
||||
winsGyro = [winsGyro winGyro];
|
||||
winsGyroX = [winsGyroX winGyro(:,1)];
|
||||
winsGyroY = [winsGyroY winGyro(:,2)];
|
||||
winsGyroZ = [winsGyroZ winGyro(:,3)];
|
||||
|
||||
[coeff2, score2] = princomp(winGyro);
|
||||
winsGyroPCA = [winsGyroPCA score2(:,1)];
|
||||
@@ -143,7 +161,9 @@ windowedData = {};
|
||||
|
||||
#magnet
|
||||
winMagnet = window(data{k}.samples{j}.raw{3}, i);
|
||||
winsMagnet = [winsMagnet winMagnet];
|
||||
winsMagnetX = [winsMagnetX winMagnet(:,1)];
|
||||
winsMagnetY = [winsMagnetY winMagnet(:,2)];
|
||||
winsMagnetZ = [winsMagnetZ winMagnet(:,3)];
|
||||
|
||||
[coeff3, score3] = princomp(winMagnet);
|
||||
winsMagnetPCA = [winsMagnetPCA score3(:,1)];
|
||||
@@ -154,53 +174,68 @@ windowedData = {};
|
||||
end
|
||||
|
||||
#write back data
|
||||
windowedData{k}.samples{j}.raw{1}.wins = winsAccel;
|
||||
windowedData{k}.samples{j}.raw{2}.wins = winsGyro;
|
||||
windowedData{k}.samples{j}.raw{3}.wins = winsMagnet;
|
||||
windowedData{k}.samples{j}.raw{4}.wins = winsAccelPCA;
|
||||
windowedData{k}.samples{j}.raw{5}.wins = winsGyroPCA;
|
||||
windowedData{k}.samples{j}.raw{6}.wins = winsMagnetPCA;
|
||||
windowedData{k}.samples{j}.raw{7}.wins = winsAccelMG;
|
||||
windowedData{k}.samples{j}.raw{8}.wins = winsGyroMG;
|
||||
windowedData{k}.samples{j}.raw{9}.wins = winsMagnetMG;
|
||||
windowedData{k}.samples{j}.raw{1}.wins = winsAccelX; #X
|
||||
windowedData{k}.samples{j}.raw{2}.wins = winsAccelY; #Y
|
||||
windowedData{k}.samples{j}.raw{3}.wins = winsAccelZ; #Z
|
||||
windowedData{k}.samples{j}.raw{4}.wins = winsGyroX;
|
||||
windowedData{k}.samples{j}.raw{5}.wins = winsGyroY;
|
||||
windowedData{k}.samples{j}.raw{6}.wins = winsGyroZ;
|
||||
windowedData{k}.samples{j}.raw{7}.wins = winsMagnetX;
|
||||
windowedData{k}.samples{j}.raw{8}.wins = winsMagnetY;
|
||||
windowedData{k}.samples{j}.raw{9}.wins = winsMagnetZ;
|
||||
windowedData{k}.samples{j}.raw{10}.wins = winsAccelPCA;
|
||||
windowedData{k}.samples{j}.raw{11}.wins = winsGyroPCA;
|
||||
windowedData{k}.samples{j}.raw{12}.wins = winsMagnetPCA;
|
||||
windowedData{k}.samples{j}.raw{13}.wins = winsAccelMG;
|
||||
windowedData{k}.samples{j}.raw{14}.wins = winsGyroMG;
|
||||
windowedData{k}.samples{j}.raw{15}.wins = winsMagnetMG;
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
function features = featureCalculation(data)
|
||||
|
||||
features = [];
|
||||
global setAutocorrelationBinSize;
|
||||
global setPSDBinSize;
|
||||
features = [];
|
||||
|
||||
for k = 1:numel(data)
|
||||
for j = 1:numel(data{k}.samples)
|
||||
for i = 1:numel(data{k}.samples{j}.raw)
|
||||
for m = 1:numel(data{k}.samples{j}.raw{i}.wins)
|
||||
currentWindow = data{k}.samples{j}.raw{i}.wins{m};
|
||||
currentWindow = currentWindow(:,1);
|
||||
|
||||
#autocorrelation on window. split into 5 evenly spaced bins (frequencies are evenly spaced, not number of values ;) ) and calculate mean of bin.
|
||||
[autoCorr] = xcorr(currentWindow);
|
||||
[binNum, binCenter] = hist(autoCorr, 5); #define 5 bins for the data.
|
||||
[binNum, binCenter] = hist(autoCorr, setAutocorrelationBinSize); #define bins for the data.
|
||||
binSize = abs(binCenter(end-1) - binCenter(end));
|
||||
binEdges = linspace(binCenter(1)-(binSize/2), binCenter(end)+(binSize/2), 6);
|
||||
binEdges = linspace(binCenter(1)-(binSize/2), binCenter(end)+(binSize/2), setAutocorrelationBinSize+1);
|
||||
[binNumc, binIdx] = histc(autoCorr, binEdges);
|
||||
binMeans = getBinMean(autoCorr, binIdx, 5);
|
||||
binMeans = getBinMean(autoCorr, binIdx, setAutocorrelationBinSize);
|
||||
|
||||
#calculate the root-mean-square (RMS) of the signal
|
||||
rms = sqrt(mean(currentWindow.^2));
|
||||
|
||||
#power bands 0.5 to 25hz (useful if the windows are greater then 4s and window sizes to 256, 512..)
|
||||
[powerBand, w] = periodogram(currentWindow); #fills up fft with zeros
|
||||
powerEdges = logspace(log10(0.5), log10(25), 10 + 2); #logarithmic bin spaces for 10 bins
|
||||
powerEdges = logspace(log10(0.5), log10(25), setPSDBinSize + 2); #logarithmic bin spaces for 10 bins
|
||||
triFilter = getTriangularFunction(powerEdges, length(powerBand)*2 - 2);
|
||||
for l = 1:numel(triFilter)
|
||||
filteredBand = triFilter{l} .* powerBand;
|
||||
psd(l) = sum(filteredBand); #sum freq (no log and no dct)
|
||||
end
|
||||
|
||||
#statistical features
|
||||
windowMean = mean(currentWindow);
|
||||
windowSTD = std(currentWindow); #standard deviation
|
||||
windowVariance = var(currentWindow);
|
||||
windowKurtosis = kurtosis(currentWindow); #(ger. Wölbung)
|
||||
windowIQR = iqr(currentWindow); #interquartile range
|
||||
|
||||
#put everything together
|
||||
classLabel = k; #what class?
|
||||
features = [features; classLabel, binMeans, rms, psd];
|
||||
sampleLabel = j; #what sample?
|
||||
features = [features; sampleLabel, classLabel, binMeans, rms, psd, windowMean, windowSTD, windowVariance, windowKurtosis, windowIQR];
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -209,29 +244,34 @@ end
|
||||
|
||||
function value = getBinMean(data, idx, numBins)
|
||||
|
||||
#search for an idx == numBins+1. this index occurs if a value == lastEdge, thanks histc...
|
||||
isSix = find(idx == numBins+1);
|
||||
if isSix != 0
|
||||
idx(isSix) = numBins;
|
||||
endif
|
||||
|
||||
value = [];
|
||||
value = [];
|
||||
for i = 1:numBins
|
||||
flagBinMembers = (idx == i);
|
||||
binMembers = data(flagBinMembers);
|
||||
|
||||
# if length(binMembers) == 0
|
||||
# idx
|
||||
#data
|
||||
# input = 'balala'
|
||||
#endif
|
||||
|
||||
value(i) = mean(binMembers);
|
||||
if length(binMembers) == 0
|
||||
value(i) = 0;
|
||||
else
|
||||
value(i) = mean(binMembers);
|
||||
endif
|
||||
end
|
||||
end
|
||||
|
||||
#triangular functions. (edges of the triangles; num fft values -> nfft.)
|
||||
function triFilter = getTriangularFunction(edges, nfft)
|
||||
|
||||
global samplerateHZ;
|
||||
|
||||
#get idx of the edges within the samples. thanks to fft each sample represents a frequency.
|
||||
# idx * samplerate / nfft = hertz of that idx
|
||||
for i = 1:length(edges)
|
||||
edgesByIdx(i) = floor((nfft + 1) * edges(i)/100); #100hz is the samplerate
|
||||
edgesByIdx(i) = floor((nfft + 1) * edges(i)/samplerateHZ);
|
||||
end
|
||||
|
||||
#generate the triangle filters
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
display("rawPlot")
|
||||
display("Generating Features")
|
||||
|
||||
#load and plot raw data
|
||||
#{
|
||||
@@ -14,9 +14,8 @@ access the cells using classes{u}.samples{v}.raw{w}
|
||||
#}
|
||||
source("functions.m");
|
||||
|
||||
trainsetPerClass = 6; #number of used trainsets for one class
|
||||
classes = {};
|
||||
classes = getRawTrainData(trainsetPerClass);
|
||||
classes = getRawTrainData();
|
||||
|
||||
#outPath = "/home/toni/Documents/handygames/HandyGames/toni/img/raw"
|
||||
#plotData(classes, outPath);
|
||||
@@ -32,12 +31,12 @@ filteredClasses = filterData(classes);
|
||||
data structure of windowedClasses:
|
||||
classes[1 to 5]
|
||||
samples[1 to trainsetPerClass]
|
||||
raw[1 to 9] <--- 9 different signals
|
||||
WindowsAccel[2.5s at 200ms]
|
||||
raw[1 to 15] <--- 15 different signals
|
||||
3x WindowsAccel (X, Y, Z)
|
||||
Win, Win, Win, Win ... <--- single matrices
|
||||
WindowsGyro[2.5s at 200ms]
|
||||
3x WindowsGyro (X, Y, Z)
|
||||
Win, Win, Win, Win ... <--- single matrices
|
||||
WindowsMagnet[2.5s at 200ms]
|
||||
3x WindowsMagnet (X, Y, Z)
|
||||
Win, Win, Win, Win ... <--- single matrices
|
||||
|
||||
---> add 6 additional sensors: pca and magnitude
|
||||
@@ -56,11 +55,13 @@ windowedClasses = windowData(filteredClasses);
|
||||
#calculated features for the 5 signales (x, y, z, MG, PCA) of a sensor
|
||||
#{
|
||||
data structure of features
|
||||
label | feature #1 | 2 | 3 ...
|
||||
|
||||
[classLabel, binMeans, rms, psd, windowMean, windowSTD, windowVariance, windowKurtosis, windowIQR]
|
||||
#}
|
||||
features = featureCalculation(windowedClasses);
|
||||
|
||||
#train svm
|
||||
#save features
|
||||
save features.txt features;
|
||||
display("saved features into features.txt");
|
||||
|
||||
|
||||
|
||||
#run svm
|
||||
BIN
toni/octave/libsvmread.mex
Executable file
BIN
toni/octave/libsvmread.mex
Executable file
Binary file not shown.
BIN
toni/octave/libsvmwrite.mex
Executable file
BIN
toni/octave/libsvmwrite.mex
Executable file
Binary file not shown.
8
toni/octave/settings.m
Normal file
8
toni/octave/settings.m
Normal file
@@ -0,0 +1,8 @@
|
||||
#global trainsetPerClass = 6; #number of used trainsets for one class
|
||||
global setDataDir = "/home/toni/Documents/handygames/HandyGames/daten/";
|
||||
global setWindowSize = 256; #in samples per window. even integer!
|
||||
global setWindowSliding = 320; #in ms - (sampling rate is 10 ms.. so numSamples*10)
|
||||
global setAutocorrelationBinSize = 5;
|
||||
global setPSDBinSize = 10;
|
||||
global samplerateHZ = 100;
|
||||
|
||||
BIN
toni/octave/svmpredict.mex
Executable file
BIN
toni/octave/svmpredict.mex
Executable file
Binary file not shown.
BIN
toni/octave/svmtrain.mex
Executable file
BIN
toni/octave/svmtrain.mex
Executable file
Binary file not shown.
@@ -3,6 +3,7 @@ daten mal plotten
|
||||
vorverarbeitung:
|
||||
low-pass-filter (-60dB at 20hZ)
|
||||
windowing (5s sliding at 200ms)
|
||||
Hinweis: die ersten 4 - 5 samples sind schon ziemlich mistig. kommen ganz gruselige werte.
|
||||
|
||||
segmentation (erstmal weglassen, da daten dafür nicht trainiert. bräuchten trainingsdaten von non-exercise die dann gelabeled sind)
|
||||
laufen ist z.b. keine übung! (man läuft ja zwischen übungen durch die gegend)
|
||||
|
||||
49
toni/octave/training.m
Normal file
49
toni/octave/training.m
Normal file
@@ -0,0 +1,49 @@
|
||||
#train features using svm
|
||||
display("Train Features")
|
||||
|
||||
#load all features
|
||||
# features = [sampleLabel, classLabel, binMeans, rms, psd, windowMean, windowSTD, windowVariance, windowKurtosis, windowIQR];
|
||||
load "features.txt"; #matrix is also called features
|
||||
|
||||
# split features into training and test features using leave-one-out method
|
||||
# class idx:
|
||||
# idx 1 -> forwardbend
|
||||
# idx 2 -> kneebend
|
||||
# idx 3 -> pushups
|
||||
# idx 4 -> situps
|
||||
# idx 5 -> jumpingjack
|
||||
|
||||
# define which sampleSet is used as testset and not for training.
|
||||
leaveOut = find(features(:,1) == 3 & features(:,2) == 2); #sampleset 3 class 2
|
||||
testFeatures = features(leaveOut, :); #set testSignal
|
||||
features(leaveOut,:) = []; #remove the testFeatures
|
||||
features(:,1) = []; #remove the sampleLabel
|
||||
|
||||
# bring the feature matrix into libsvm format.
|
||||
# 1. the label vector:
|
||||
trainLabel = features(:,1);
|
||||
|
||||
# 2. sparse matrix with every feature in one column:
|
||||
features(:,1) = []; #remove the classLabel
|
||||
trainFeatures = sparse(features);
|
||||
|
||||
# before training we need to scale the feature values
|
||||
minimums = min(trainFeatures);
|
||||
ranges = max(trainFeatures) - minimums;
|
||||
|
||||
trainFeatures = (trainFeatures - repmat(minimums, size(trainFeatures, 1), 1)) ./ repmat(ranges, size(trainFeatures, 1), 1);
|
||||
|
||||
# training: svm with default settings
|
||||
model = svmtrain(trainLabel, trainFeatures);
|
||||
|
||||
display("Classify Features")
|
||||
# for testing we need to scale again
|
||||
testLabel = testFeatures(:,2);
|
||||
testFeatures(:,1:2) = []; #remove the labels
|
||||
testFeatures = (testFeatures - repmat(minimums, size(testFeatures, 1), 1)) ./ repmat(ranges, size(testFeatures, 1), 1);
|
||||
|
||||
# classification
|
||||
[predict_label, accuracy, dec_values] = svmpredict(testLabel, testFeatures, model);
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user