%%% Please DO NOT EDIT this file - it was automatically generated.
%%% Licentiate Theses from the Department of Information Technology,
%%% Uppsala University, Sweden.
%%% Series ISSN 1404-5117.
%%%
%%% ====================================================================
%%% BibTeX-file{
%%% author = "Bj{\"o}rn Victor",
%%% date = "20 May 2024",
%%% filename = "itlic.bib",
%%% url = "http://www.it.uu.se/research/publications/reports/lic/itlic.bib",
%%% www-home = "http://www.it.uu.se/research/publications/reports/lic/",
%%% address = "Department of Information Technology,
%%% Uppsala University,
%%% Box 337,
%%% SE-751 05 Uppsala, SWEDEN",
%%% telephone = "+46 18 471 0000",
%%% FAX = "+46 18 511925",
%%% email = "Bjorn.Victor at it.uu.se",
%%% dates = {2000--},
%%% keywords = "",
%%% supported = "yes",
%%% supported-by = "Bj{\"o}rn Victor",
%%% abstract = "Licentiate Theses from the Department of
%%% Information Technology at Uppsala University"
%%% }
%%% ====================================================================
@STRING{cb = "Centre for Image Analysis" }
@STRING{csd = "Computing Science Division" }
@STRING{docs = "Division of Computer Systems" }
@STRING{hci = "Division of Human-Computer Interaction" }
@STRING{it = "Department of Information Technology, Uppsala University" }
@STRING{mdi = "Division of Human-Computer Interaction" }
@STRING{syscon = "Division of Systems and Control" }
@STRING{tdb = "Division of Scientific Computing" }
@STRING{vi2 = "Division of Visual Information and Interaction" }
@PhDThesis{ itlic:2024-001,
author = {Thom Kunkeler},
title = {Capital and the Social Reproduction of Inequality in
Computing Education},
school = it,
department = vi3,
year = 2024,
number = {2024-001},
type = {Licentiate thesis},
month = may,
day = 31,
note = {Updated 2024-05-20.},
abstract = {Computing education in Western countries has traditionally
been characterised by low levels of participation and
diversity among its student population. In order to broaden
participation in the field, it is fundamental to understand
the various mechanisms through which power structures and
inequality are reproduced. From a Bourdieusian perspective,
this licentiate thesis sets out to understand the
interaction between capital, class, and habitus which
allows a dominant class to thrive at the expense of other
classes.
Paper I shows that capital serves as a barrier for
non-computing students entering the computing field,
whereas in Paper II a dominant class is identified as
possessing higher levels of capital, which is then related
to their higher levels of participation in the field. In
addition, Paper I provides insight into the ways the
subordinate class internalises and acts upon their lower
levels of capital.
This licentiate thesis lays out the groundwork for studying
capital in computing education by developing and validating
research instruments which can be used for further study.
In addition, relevant theories to educational participation
are discussed, with a particular focus on capital theory.
More work is needed to understand the reproductive
mechanism through which the dominant class legitimises
their capital within the field of computing education,
thereby establishing their class position. Future work is
recommended in the domain of habitus and capital-inclusive
pedagogy. Ultimately, the goal is to reduce the
reproduction of inequality in computing education by
assessing the various mechanisms involved, and designing
pedagogy which can be used for successful engagement of
students with varying levels of capital. }
}
@PhDThesis{ itlic:2023-003,
author = {Anh Tung Nguyen},
title = {Security Allocation in Networked Control Systems},
school = it,
department = docs,
year = 2023,
number = {2023-003},
type = {Licentiate thesis},
month = oct,
day = 13,
abstract = {Sustained use of critical infrastructure, such as
electrical power and water distribution networks, requires
efficient management and control. Facilitated by the
advancements in computational devices and non-proprietary
communication technology, such as the Internet, the
efficient operation of critical infrastructure relies on
network decomposition into interconnected subsystems, thus
forming networked control systems. However, the use of
public and pervasive communication channels leaves these
systems vulnerable to cyber attacks. Consequently, the
critical infrastructure is put at risk of suffering
operation disruption and even physical damage that would
inflict financial costs as well as pose a hazard to human
health. Therefore, security is crucial to the sustained
efficient operation of critical infrastructure.
This thesis develops a framework for evaluating and
improving the security of networked control systems in the
face of cyber attacks. The considered security problem
involves two strategic agents, namely a malicious adversary
and a defender, pursuing their specific and conflicting
goals. The defender aims to efficiently allocate defense
resources with the purpose of detecting malicious
activities. Meanwhile, the malicious adversary
simultaneously conducts cyber attacks and remains stealthy
to the defender. We tackle the security problem by
proposing a game-theoretic framework and characterizing its
main components: the payoff function, the action space, and
the available information for each agent. Especially, the
payoff function is characterized based on the
output-to-output gain security metric that fully explores
the worst-case attack impact. Then, we investigate the
properties of the game and how to efficiently compute its
equilibrium. Given the combinatorial nature of the
defender's actions, one important challenge is to alleviate
the computational burden. To overcome this challenge, the
thesis contributes several systemand graph-theoretic
conditions that enable the defender to shrink the action
space, efficiently allocating the defense resources. The
effectiveness of the proposed framework is validated
through numerical examples.}
}
@PhDThesis{ itlic:2023-002,
author = {Philipp Pilar},
title = {Integrating Prior Knowledge into Machine Learning Models
with Applications in Physics},
school = it,
department = syscon,
year = 2023,
number = {2023-002},
type = {Licentiate thesis},
month = sep,
day = 20,
abstract = {At the extremes, two antithetical approaches to describing
natural processes exist. Theoretical models can be derived
from first principles, allowing for clear interpretability;
on the downside, this approach may be infeasible or
inefficient for complex systems. Alternatively, methods
from statistical machine learning can be employed to learn
black box models from large amounts of data, while
providing little or no understanding of their inner
workings.
Both approaches have different desirable properties and
weaknesses. It is natural to ask how they may be combined
to create better models. This is the question that the
field of physics-informed machine learning is concerned
with, and which we will consider in this thesis. More
precisely, we investigate ways of integrating additional
prior knowledge into machine learning models.
In Paper I, we consider multitask Gaussian processes and
devise a way to include so-called sum constraints into the
model, where a nonlinear sum of the outputs is required to
equal a known value. In Paper II, we consider the task of
determining unknown parameters from data when solving
partial differential equations (PDEs) with physics-informed
neural networks. Given the prior knowledge that the
measurement noise is homogeneous but otherwise unknown, we
demonstrate that it is possible to learn the solution and
parameters of the PDE jointly with the noise distribution.
In Paper III, we consider generative adversarial networks,
which may produce realistic-looking samples but fail to
reproduce their true distribution. In our work, we mitigate
this issue by matching the true and generated distributions
of statistics extracted from the data.}
}
@PhDThesis{ itlic:2023-001,
author = {H{\aa}kan Runvik},
title = {Modeling and Estimation of Impulsive Biomedical Systems},
school = it,
department = syscon,
year = 2023,
number = {2023-001},
type = {Licentiate thesis},
month = jun,
day = 12,
abstract = {Dynamical systems are often expressed in either continuous
or discrete time. Some biomedical processes are however
more suitably modeled as impulsive systems, which combine
continuous dynamics with abrupt changes of the state of the
system. This thesis concerns two such systems: the
pharmacokinetics of the anti-Parkinson's drug levodopa, and
the testosterone regulation in the human male. Despite the
differences between these systems, they can be modeled in
similar ways. Modeling entails not only the model, but also
the methods used to estimate its parameters. Impulsive
dynamics can enable simpler representations compared with
using continuous dynamics alone, but may also complicate
the estimation procedure, since standard techniques often
cannot be used. The contributions of this thesis are
therefore both in model development and parameter
estimation.
Model development is the topic of Paper I. It presents a
model of the multi-peaking phenomenon in levodopa
pharmacokinetics, which is manifested by secondary
concentration peaks in the blood concentration profile of
the drug. The remaining papers focus on estimation, in a
setup where a sequence of impulses is fed to a linear
plant, whose output is measured. Two estimation techniques
are considered. The first is presented in Paper II and uses
a Laguerre domain representation to estimate the timing and
weights of the impulses. The second combines estimation of
the impulsive input with estimation of the plant
parameters, which represent the elimination rates of
testosterone-regulating hormones. This problem is
particularly challenging since increasing the estimated
elimination rates and the number of impulses generally
improves the model fit, but only models with sparse input
signals are practically useful. Paper III addresses this
issue through a novel regularization method. The
uncertainties in model and measurements encountered when
working with clinical hormone data add another layer of
complexity to the problem; methods for handling such issues
are described in Paper IV.}
}
@PhDThesis{ itlic:2022-003,
author = {Camille Clouard},
title = {Computational Statistical Methods for Genotyping Biallelic
{DNA} Markers from Pooled Experiments},
school = it,
department = tdb,
year = 2022,
number = {2022-003},
type = {Licentiate thesis},
month = nov,
day = 9,
abstract = {The information conveyed by genetic markers such as Single
Nucleotide Polymorphisms (SNPs) has been widely used in
biomedical research for studying human diseases, but also
increasingly in agriculture by plant and animal breeders
for selection purposes. Specific identified markers can act
as a genetic signature that is correlated to certain
characteristics in a living organism, e.g. a sensitivity to
a disease or high-yield traits. Capturing these signatures
with sufficient statistical power often requires large
volumes of data, with thousands of samples to analyze and
possibly millions of genetic markers to screen.
Establishing statistical significance for effects from
genetic variations is especially delicate when they occur
at low frequencies.
The production cost of such marker genotype data is
therefore a critical part of the analysis. Despite recent
technological advances, the production cost can still be
prohibitive and genotype imputation strategies have been
developed for addressing this issue. The genotype
imputation methods have been widely investigated on human
data and to a smaller extent on crop and animal species. In
the case where only few reference genomes are available for
imputation purposes, such as for non-model organisms, the
imputation results can be less accurate. Group testing
strategies, also called pooling strategies, can be
well-suited for complementing imputation in large
populations and decreasing the number of genotyping tests
required compared to the single testing of every
individual. Pooling is especially efficient for genotyping
the low-frequency variants. However, because of the
particular nature of genotype data and because of the
limitations inherent to the genotype testing techniques,
decoding pooled genotypes into unique data resolutions is a
challenge. Overall, the decoding problem with pooled
genotypes can be described as as an inference problem in
Missing Not At Random data with nonmonotone missingness
patterns.
Specific inference methods such as variations of the
Expectation-Maximization algorithm can be used for
resolving the pooled data into estimates of the genotype
probabilities for every individual. However, the
non-randomness of the undecoded data impacts the outcomes
of the inference process. This impact is propagated to
imputation if the inferred genotype probabilities are to be
devised as input into classical imputation methods for
genotypes. In this work, we propose a study of the specific
characteristics of a pooling scheme on genotype data, as
well as how it affects the results of imputation methods
such as tree-based haplotype clustering or coalescent
models.}
}
@PhDThesis{ itlic:2022-002,
author = {Gustaf Borgstr{\"o}m},
title = {Making Sampled Simulations Faster by Minimizing Warming
Time},
school = it,
department = docs,
year = 2022,
number = {2022-002},
type = {Licentiate thesis},
month = oct,
day = 28,
abstract = {A computer system simulator is a fundamental tool for
computer architects to try out brand new ideas or explore
the system's response to different configurations when
executing different program codes. However, even simulating
the CPU core in detail is time-consuming as the execution
rate slows down by several orders of magnitude compared to
native execution. To solve this problem, previous work,
namely SMARTS, demonstrates a statistical sampling
methodology that records measurements only from tiny
samples throughout the simulation. It spends only a
fraction of the full simulation time on these sample
measurements. In-between detailed sample simulations,
SMARTS fast-forwards in the simulation using a greatly
simplified and much faster simulation model (compared to
full detail), which maintains only necessary parts of the
architecture, such as cache memory. This maintenance
process is called warming. While warming is mandatory to
keep the simulation accuracy high, caches may be
sufficiently warm for an accurate simulation long before
reaching the sample. In other words, much time may be
wasted on warming in SMARTS.
In this work, we show that caches can be kept in an
accurate state with much less time spent on warming. The
first paper presents Adaptive Cache Warming, a methodology
for identifying the minimum amount of warming in an
iterative process for every SMARTS sample. The rest of the
simulation time, previously spent on warming, can be
skipped by fast-forwarding between samples using native
hardware execution of the code. Doing so will thus result
in significantly faster statistically sampled simulation
while maintaining accuracy. The second paper presents Cache
Merging, which mitigates the redundant warmings introduced
in Adaptive Cache Warming. We solve this issue by going
back in time and merging the existing warming with a cache
warming session that comes chronologically before the
existing warming. By removing the redundant warming, we
yield even more speedup. Together, Adaptive Cache Warming
and Cache Merging is a powerful boost for statistically
sampled simulations.}
}
@PhDThesis{ itlic:2022-001,
author = {Sam Hylamia},
title = {Secure In-body Communication and Sensing},
school = it,
department = docs,
year = 2022,
number = {2022-001},
type = {Licentiate thesis},
month = oct,
day = 26,
abstract = {Implantable medical devices (IMDs) such as cardiac
implants and insulin pumps provide patients with lifesaving
functions and improve their lives. These properties make
them an integral part of medical professionals' toolbox.
Today, IMDs which can be controlled or adjusted wirelessly
are widely adopted and are becoming increasingly connected
to each other and to the internet. While the modern
communication properties of IMDs provide substantial
benefits, they pose a major cybersecurity risk when devices
are not secured adequately.
In this thesis, we explore security issues related to the
communication and sensing capabilities of modern on-body
devices such as IMDs. In particular, we investigate
authentication and key agreement in a network of body-worn
devices, and address the privacy of in-body continuous
sensing and monitoring.
The main contributions of this thesis are twofold: (1) We
propose and evaluate Tiek, an authentication and key
distribution protocol for networked body-worn devices. Tiek
authenticates the presence of participating devices on the
body and distributes cryptographic keys to them using
environment based sources of randomness. The protocol
utilizes a two-tier authorization scheme to restrict the
access of mal-behaving body-worn participants to the
network. (2) We also study the information leakage
associated with the deployment of a novel in-body
continuous monitoring technique. We target the information
leakage from the sensing process, and propose and evaluate
privacy enhancing measures that prevent a passive
eavesdropper from violating the privacy of the patient. We
believe this thesis contributes to the development of
secure on-body devices in general and IMDs in particular.}
}
@PhDThesis{ itlic:2021-002,
author = {Karl Bengtsson Bernander},
title = {Improving Training of Deep Learning for Biomedical Image
Analysis and Computational Physics},
type = {Licentiate thesis},
institution = it,
department = vi2,
number = {2021-002},
year = 2021,
month = dec,
day = 22,
abstract = {The previous decade has seen breakthroughs in image
analysis and computer vision, mainly due to machine
learning methods known as deep learning. These methods have
since spread to other fields. This thesis aims to survey
the progress, highlight problems related to data and
computations, and show techniques to mitigate them.
In Paper I, we show how to modify the VGG16 classifier
archi- tecture to be equivariant to transformations in the
p4 group, consisting of translations and specific
rotations. We conduct experiments to investigate if
baseline architectures, using data augmentation, can be
replaced with these rotation-equivariant networks. We train
and test on the Oral cancer dataset, used to automate
cancer diagnostics.
In Paper III, we use a similar methodology as in Paper I to
modify the U-net architecture combined with a
discriminative loss, for semantic instance segmentation. We
test the method on the BBBC038 dataset consisting of highly
varied images of cell nuclei.
In Paper II, we look at the UCluster method, used to group
sub- atomic particles in particle physics. We show how to
distribute the training over multiple GPUs using
distributed deep learning in a cloud environment.
The papers show how to use limited training data more effi-
ciently, using group-equivariant convolutions, to reduce
the prob- lems of overfitting. They also demonstrate how to
distribute training over multiple nodes in computational
centers, which is needed to handle growing data sizes.}
}
@PhDThesis{ itlic:2021-001,
author = {Niklas Gunnarsson},
title = {On the Registration and Modeling of Sequential Medical
Images},
school = it,
department = syscon,
year = 2021,
number = {2021-001},
type = {Licentiate thesis},
day = 16,
month = dec,
abstract = {Real-time imaging can be used to monitor, analyze and
control medical treatments. In this thesis, we want to
explain the spatiotemporal motion and thus enable more
advanced procedures, especially real-time adaptation in
radiation therapy. The motion occurring between image
acquisitions can be quantified by image registration, which
generates a mapping between the images.
The contribution of the thesis consists of three papers,
where we have used different approaches to estimate the
motion between images.
In Paper I, we combine a state-of-the-art method in
real-time tracking with a learned sparse-to-dense
interpolation scheme. For this, we track an arbitrary
number of regions in a sequence of medical images. We
estimated a sparse displacement field, based on the
tracking positions and used the interpolation network to
achieve its dense representation.
Paper II was a contribution to a challenge in learnable
image registration where we finished at 2nd place. Here we
train a deep learning method to estimate the dense
displacement field between two images. For this, we used a
network architecture inspired by both conventional medical
image registration methods and optical flow in computer
vision.
For Paper III, we estimate the dynamics of spatiotemporal
images by training a generative network. We use nonlinear
dimensional reduction techniques and assume a linear
dynamic in a low-dimensional latent space. In comparison
with conventional image registration methods, we provide a
method more suitable for real-world scenarios, with the
possibility of imputation and extrapolation.
Although the problem is challenging and several questions
are left unanswered we believe a combination of
conventional, learnable, and dynamic modeling of the motion
is the way forward. }
}
@PhDThesis{ itlic:2020-006,
author = {David Widmann},
title = {Calibration of Probabilistic Predictive Models},
school = it,
department = syscon,
year = 2020,
number = {2020-006},
type = {Licentiate thesis},
month = oct,
day = 28,
abstract = {Predicting unknown and unobserved events is a common task
in many domains. Mathematically, the uncertainties arising
in such prediction tasks can be described by probabilistic
predictive models. Ideally, the model estimates of these
uncertainties allow us to distinguish between uncertain and
trustworthy predictions. This distinction is particularly
important in safety-critical applications such as medical
image analysis and autonomous driving.
For the probabilistic predictions to be meaningful and to
allow this differentiation, they should neither be over-
nor underconfident. Models that satisfy this property are
called calibrated. In this thesis we study how one can
measure, estimate, and statistically reason about the
calibration of probabilistic predictive models.
In Paper I we discuss existing approaches for evaluating
calibration in multi-class classification. We mention
potential pitfalls and suggest hypothesis tests for the
statistical analysis of model calibration.
In Paper II we propose a framework of calibration measures
for multi-class classification. It captures common existing
measures and includes a new kernel calibration error based
on matrix-valued kernels. For the kernel calibration error
consistent and unbiased estimators exist and asymptotic
hypothesis tests for calibration can be derived.
Unfortunately, by construction the framework is limited to
prediction problems with finite discrete target spaces.
In Paper III we use a different approach to develop a more
general framework of calibration errors that applies to any
probabilistic predictive model and is not limited to
classification. We show that it coincides with the
framework presented in Paper II for multi-class
classification. Based on scalar-valued kernels, we
generalize the kernel calibration error, its estimators,
and hypothesis tests to all probabilistic predictive
models. For real-valued regression problems we present
empirical results.}
}
@PhDThesis{ itlic:2020-005,
author = {Anna Wigren},
title = {Exploiting Conjugacy in State-Space Models with Sequential
{M}onte {C}arlo},
school = it,
department = syscon,
year = 2020,
number = {2020-005},
type = {Licentiate thesis},
month = may,
day = 14,
abstract = {Many processes we encounter in our daily lives are
dynamical systems that can be described mathematically
using state-space models. Exact inference of both states
and parameters in these models is, in general, intractable.
Instead, approximate methods, such as sequential Monte
Carlo and Markov chain Monte Carlo, are used to infer
quantities of interest. However, sample-based inference
inherently introduces variance in the estimates. In this
thesis we explore different aspects of how conjugacy
relations in a model can improve the performance of
sequential Monte Carlo-based inference methods.
A conjugacy relation between the prior distribution and the
likelihood implies that the posterior distribution has the
same distributional form as the prior, allowing for
analytic updates in place of numerical integration. In
Paper I we consider state inference in state-space models
where the transition density is intractable. By adding
artificial noise conjugate to the observation density we
can design an efficient proposal for sequential Monte Carlo
inference that can reduce the variance of the state
estimates. Conjugacy can also be utilized in the setting of
parameter inference. In Paper II we show that the
performance of particle Gibbs-type samplers, in terms of
the autocorrelation of the samples, can be improved when
conjugacy relations allow for marginalizing out the
dependence on parameters in the state update.
Despite enabling analytical evaluation of integrals, the
derivation and implementation of conjugacy updates is
cumbersome in all but the simplest cases, which limits the
usefulness in practice. Recently, the emerging field of
probabilistic programming has changed this, by providing a
framework for automating inference in probabilistic models
-- including identifying and utilizing conjugacy relations.
In Paper II we make use of probabilistic programming to
automatically exploit conjugacy in an epidemiological
state-space model describing the spread of dengue fever. }
}
@PhDThesis{ itlic:2020-004,
author = {Muhammad Osama},
title = {Machine Learning for Spatially Varying Data},
school = it,
department = syscon,
year = 2020,
number = {2020-004},
type = {Licentiate thesis},
month = apr,
day = 22,
abstract = {Many physical quantities around us vary across space or
space-time. An example of a \emph{spatial} quantity is
provided by the temperature across Sweden on a given day
and as an example of a \emph{spatio-temporal} quantity we
observe the counts of the corona virus cases across the
globe. Spatial and spatio-temporal data enable
opportunities to answer many important questions. For
example, what the weather would be like tomorrow or where
the highest risk for occurrence of a disease is in the next
few days? Answering questions such as these requires
formulating and learning statistical models.
One of the challenges with spatial and spatio-temporal data
is that the size of data can be extremely large which makes
learning a model computationally costly. There are several
means of overcoming this problem by means of matrix
manipulations and approximations. In paper I, we propose a
solution to this problem where the model is learned in a
streaming fashion i.e. as the data arrives point by point.
This also allows for efficient updating of the learned
model based on newly arriving data which is very pertinent
to spatio-temporal data.
Another interesting problem in the spatial context is to
study the causal effect that an exposure variable has on a
response variable. For instance, policy makers might be
interested in knowing whether increasing the number of
police in a district has the desired effect of reducing
crimes there. The challenge here is that of \emph{spatial
confounding}. A spatial map of the number of police against
the spatial map of the number of crimes in different
districts might show a clear association between these two
quantities. However, there might be a third unobserved
confounding variable that makes both quantities small and
large together. In paper II, we propose a solution for
estimating causal effects in the presence of such a
confounding variable.
Another common type of spatial data is \emph{point} or
\emph{event} data, i.e., the occurrence of events across
space. The event could for example be a reported disease or
crime and one may be interested in predicting the counts of
the event in a given region. A fundamental challenge here
is to quantify the uncertainty in the predicted counts in a
model in a robust manner. In paper III, we propose a
regularized criterion for learning a predictive model of
counts of events across spatial regions. The regularization
ensures tighter prediction intervals around the predicted
counts and have valid coverage irrespective of the degree
of model misspecification. }
}
@PhDThesis{ itlic:2020-003,
author = {Christos Sakalis},
title = {Securing the Memory Hierarchy from Speculative
Side-Channel Attacks},
school = it,
department = docs,
year = 2020,
number = {2020-003},
type = {Licentiate thesis},
month = mar,
day = 6,
abstract = {Modern high-performance CPUs depend on speculative
out-of-order execution in order to offer high performance
while also remaining energy efficient. However, with the
introduction of Meltdown and Spectre in the beginning of
2018, speculative execution has been under attack. These
attacks, and the many that followed, take advantage of the
unchecked nature of speculative execution and the
microarchitectural changes it causes in order to mount
speculative side-channel attacks. Such attacks can bypass
software and hardware barriers and gain access to sensitive
information while remaining invisible to the application.
In this thesis we will describe our work on preventing
speculative side-channel attacks that exploit the memory
hierarchy as their side-channel. Specifically, we will
discuss two different approaches, one that does not
restrict speculative execution but tries to keep its
microarchitectural side-effects hidden, and one where we
delay speculative memory accesses if we determine that they
might lead to information leakage. We will discuss the
advantages and disadvantages of both approaches, compare
them against other state-of-the-art solutions, and show
that it is possible to achieve secure, invisible
speculation while at the same time maintaining high
performance and efficiency. }
}
@PhDThesis{ itlic:2020-002,
author = {Ulrika Sundin},
title = {Global Radial Basis Function Collocation Methods for
{PDE}s},
school = it,
department = tdb,
year = 2020,
number = {2020-002},
type = {Licentiate thesis},
month = mar,
day = 20,
abstract = {Radial basis function (RBF) methods are meshfree, i.e.,
they can operate on unstructured node sets. Because the
only geometric information required is the pairwise
distance between the node points, these methods are highly
flexible with respect to the geometry of the computational
domain. The RBF approximant is a linear combination of
translates of a radial function, and for PDEs the
coefficients are found by applying the PDE operator to the
approximant and collocating with the right hand side data.
Infinitely smooth RBFs typically result in exponential
convergence for smooth data, and they also have a shape
parameter that determines how flat or peaked they are, and
that can be used for accuracy optimization. In this thesis
the focus is on global RBF collocation methods for PDEs,
i.e., methods where the approximant is constructed over the
whole domain at once, rather than built from several local
approximations. A drawback of these methods is that they
produce dense matrices that also tend to be ill-conditioned
for the shape parameter range that might otherwise be
optimal. One current trend is therefore to use
over-determined systems and least squares approximations as
this improves stability and accuracy. Another trend is to
use localized RBF methods as these result in sparse
matrices while maintaining a high accuracy. Global RBF
collocation methods together with RBF interpolation
methods, however, form the foundation for these other
versions of RBF--PDE methods. Hence, understanding the
behaviour and practical aspects of global collocation is
still important. In this thesis an overview of global RBF
collocation methods is presented, focusing on different
versions of global collocation as well as on method
properties such as error and convergence behaviour,
approximation behaviour in the small shape parameter range,
and practical aspects including how to distribute the nodes
and choose the shape parameter value. Our own research
illustrates these different aspects of global RBF
collocation when applied to the Helmholtz equation and the
Black--Scholes equation.}
}
@PhDThesis{ itlic:2019-007,
author = {Carl Andersson},
title = {Deep Learning Applied to System Identification: A
Probabilistic Approach},
school = it,
department = syscon,
year = 2019,
number = {2019-007},
type = {Licentiate thesis},
month = dec,
day = 4,
abstract = {Machine learning has been applied to sequential data for a
long time in the field of system identification. As deep
learning grew under the late 00's machine learning was
again applied to sequential data but from a new angle, not
utilizing much of the knowledge from system identification.
Likewise, the field of system identification has yet to
adopt many of the recent advancements in deep learning.
This thesis is a response to that. It introduces the field
of deep learning in a probabilistic machine learning
setting for problems known from system identification.
Our goal for sequential modeling within the scope of this
thesis is to obtain a model with good predictive and/or
generative capabilities. The motivation behind this is that
such a model can then be used in other areas, such as
control or reinforcement learning. The model could also be
used as a stepping stone for machine learning problems or
for pure recreational purposes.
Paper I and Paper II focus on how to apply deep learning to
common system identification problems. Paper I introduces a
novel way of regularizing the impulse response estimator
for a system. In contrast to previous methods using
Gaussian processes for this regularization we propose to
parameterize the regularization with a neural network and
train this using a large dataset. Paper II introduces deep
learning and many of its core concepts for a system
identification audience. In the paper we also evaluate
several contemporary deep learning models on standard
system identification benchmarks. Paper III is the odd fish
in the collection in that it focuses on the mathematical
formulation and evaluation of calibration in classification
especially for deep neural network. The paper proposes a
new formalized notation for calibration and some novel
ideas for evaluation of calibration. It also provides some
experimental results on calibration evaluation. }
}
@PhDThesis{ itlic:2019-006,
author = {Kristiina Ausmees},
title = {Efficient Computational Methods for Applications in
Genomics},
school = it,
department = tdb,
year = 2019,
number = {2019-006},
month = nov,
type = {Licentiate thesis},
abstract = {During the last two decades, advances in molecular
technology have facilitated the sequencing and analysis of
ancient DNA recovered from archaeological finds,
contributing to novel insights into human evolutionary
history. As more ancient genetic information has become
available, the need for specialized methods of analysis has
also increased. In this thesis, we investigate statistical
and computational models for analysis of genetic data, with
a particular focus on the context of ancient DNA.
The main focus is on imputation, or the inference of
missing genotypes based on observed sequence data. We
present results from a systematic evaluation of a common
imputation pipeline on empirical ancient samples, and show
that imputed data can constitute a realistic option for
population-genetic analyses. We also discuss preliminary
results from a simulation study comparing two methods of
phasing and imputation, which suggest that the parametric
Li and Stephens framework may be more robust to extremely
low levels of sparsity than the parsimonious Browning and
Browning model.
An evaluation of methods to handle missing data in the
application of PCA for dimensionality reduction of genotype
data is also presented. We illustrate that non-overlapping
sequence data can lead to artifacts in projected scores,
and evaluate different methods for handling unobserved
genotypes.
In genomics, as in other fields of research, increasing
sizes of data sets are placing larger demands on efficient
data management and compute infrastructures. The last part
of this thesis addresses the use of cloud resources for
facilitating such analysis. We present two different
cloud-based solutions, and exemplify them on applications
from genomics.}
}
@PhDThesis{ itlic:2019-005,
author = {Carl Jidling},
title = {Tailoring {G}aussian Processes for Tomographic
Reconstruction},
school = it,
department = syscon,
year = 2019,
number = {2019-005},
type = {Licentiate thesis},
month = oct,
day = 18,
abstract = {A probabilistic model reasons about physical quantities as
random variables that can be estimated from measured data.
The Gaussian process is a respected member of this family,
being a flexible non-parametric method that has proven
strong capabilities in modelling a wide range of nonlinear
functions. This thesis focuses on advanced Gaussian process
techniques; the contribution consist of practical
methodologies primarily intended for inverse tomographic
applications.
In our most theoretical formulation, we propose a
constructive procedure for building a customised covariance
function given any set of linear constraints. These are
explicitly incorporated in the prior distribution and
thereby guaranteed to be fulfilled by the prediction.
One such construction is employed for strain field
reconstruction, to which end we successfully introduce the
Gaussian process framework. A particularly well-suited
spectral based approximation method is used to obtain a
significant reduction of the computational load. The
formulation has seen several subsequent extensions,
represented in this thesis by a generalisation that
includes boundary information and uses variational
inference to overcome the challenge provided by a nonlinear
measurement model.
We also consider X-ray computed tomography, a field of high
importance primarily due to its central role in medical
treatments. We use the Gaussian process to provide an
alternative interpretation of traditional algorithms and
demonstrate promising experimental results. Moreover, we
turn our focus to \emph{deep kernel learning}, a special
construction in which the expressiveness of a standard
covariance function is increased through a neural network
input transformation. We develop a method that makes this
approach computationally feasible for integral
measurements, and the results indicate a high potential for
computed tomography problems.}
}
@PhDThesis{ itlic:2019-004,
author = {Amin Kaveh},
title = {Local Measures for Probabilistic Networks},
school = it,
department = csd,
year = 2019,
number = {2019-004},
type = {Licentiate thesis},
month = sep,
day = 26,
abstract = {Modeling and analysis of imperfection in network data is
essential in many applications such as protein-protein
interaction networks, ad-hoc networks and social influence
networks. In the study of imperfect network data, three
issues have to be considered: first the type of
imperfection, second the aspects of networks such as
existence of nodes/edges or attributes of nodes/edges in
which imperfection occurs and third the theory that has
been used to represent imperfection. This thesis, first,
reviews the different types of imperfection and
consolidates the meaning of the terms used in literature.
Second, it discusses network aspects and theories through
which imperfect network data is represented and analyzed.
Amongst all, the most applied model is uncertainty about
existence of edges which is represented using probability
theory, called probabilistic networks. Third, this thesis
surveys queries and algorithms which have been applied over
probabilistic networks.
Forth and the main focus of this dissertation is to look
deeply at nodes' local properties in probabilistic
networks. In our first contribution we have shown that two
nodes with the same expected degree can have different
properties. In this work we have highlighted the role of
other summary information of degree distribution such as
variance and skewness in addition to the expected value. In
our second contribution, we have introduced two possible
definitions of probabilistic ego networks and we have
studied the concepts of degree, ego betweenness and ego
closeness.
One of the main applications of the proposed local
properties could be in the \textit{sparsification} process,
in which a network's edges and the probability of the edges
are altered, but nodes' local properties are preserved.}
}
@PhDThesis{ itlic:2019-003,
author = {Viktor Bro},
title = {Volterra Modeling of the Human Smooth Pursuit System in
Health and Disease},
school = it,
department = syscon,
year = 2019,
number = {2019-003},
type = {Licentiate thesis},
month = may,
day = 13,
abstract = {This thesis treats the identification of Volterra models
of the human smooth pursuit system from eye-tracking data.
Smooth pursuit movements are gaze movements used in
tracking of moving targets and controlled by a complex
biological network involving the eyes and brain. Because of
the neural control of smooth pursuit, these movements are
affected by a number of neurological and mental conditions,
such as Parkinson's disease. Therefore, by constructing
mathematical models of the smooth pursuit system from
eye-tracking data of the patient, it may be possible to
identify symptoms of the disease and quantify them. While
the smooth pursuit dynamics are typically linear in healthy
subjects, this is not necessarily true in disease or under
influence of drugs. The Volterra model is a classical
black-box model for dynamical systems with smooth
nonlinearities that does not require much \textit{a priori}
information about the plant and thus suitable for modeling
the smooth pursuit system.
The contribution of this thesis is mainly covered by the
four appended papers. Papers~I-III treat the problem of
reducing the number of parameters in Volterra models with
the kernels parametrized in Laguerre functional basis
(Volterra-Laguerre models), when utilizing them to capture
the signal form of smooth pursuit movements. Specifically,
a Volterra-Laguerre model is obtained by means of sparse
estimation and principal component analysis in Paper~I, and
a Wiener model approach is used in Paper~II. In Paper~III,
the same model as in Paper~I is considered to examine the
feasibility of smooth pursuit eye tracking for biometric
purposes. Paper~IV is concerned with a Volterra-Laguerre
model that includes an explicit time delay. An approach to
the joint estimation of the time delay and the
finite-dimensional part of the Volterra model is proposed
and applied to time-delay compensation in eye-tracking
data. }
}
@PhDThesis{ itlic:2019-002,
author = {Anton G. Artemov},
title = {Inverse Factorization in Electronic Structure Theory:
Analysis and Parallelization},
school = it,
department = tdb,
year = 2019,
number = {2019-002},
type = {Licentiate thesis},
month = jun,
day = 5,
abstract = {This licentiate thesis is a part of an effort to run large
electronic structure calculations in modern computational
environments with distributed memory. The ultimate goal is
to model materials consisting of millions of atoms at the
level of quantum mechanics. In particular, the thesis
focuses on different aspects of a computational problem of
inverse factorization of Hermitian positive definite
matrices. The considered aspects are numerical properties
of the algorithms and parallelization. Not only is an
efficient and scalable computation of inverse factors
necessary in order to be able to run large scale electronic
computations based on the Hartree-Fock or Kohn-Sham
approaches with the self-consistent field procedure, but it
can be applied more generally for preconditioner
construction. Parallelization of algorithms with unknown
load and data distributions requires a paradigm shift in
programming. In this thesis we also discuss a few parallel
programming models with focus on task-based models, and,
more specifically, the Chunks and Tasks model.}
}
@PhDThesis{ itlic:2019-001,
author = {Diane Golay},
title = {An Invisible Burden: An Experience-Based Approach to
Nurses' Daily Work Life with Healthcare Information
Technology},
school = it,
department = vi2,
year = 2019,
number = {2019-001},
type = {Licentiate thesis},
month = mar,
day = 22,
abstract = {Information and Communication Technology (ICT) has been an
increasingly pervasive component of most workplaces
throughout the past half century. In healthcare, the turn
to the digital has resulted into the broad implementation
of Healthcare Information Technology (HIT). The impacts of
ICT on work life have been investigated predominantly
through surveys, although some researchers have advocated
for the use of a qualitative, experience-based approach.
Meanwhile, the existing body of research on the impacts of
HIT on clinicians has painted a mixed picture of
digitalization. Despite some clear benefits, HIT has indeed
been found to have unexpected, unintended adverse
consequences for hospital staff. Typical issues include
loss in efficiency, extra effort to carry out routine
tasks, and the creation of new, HIT-induced work
activities. Simultaneously, research outside of the
healthcare domain has shown that ICT could require extra
effort from some users in order for the sociotechnical
system to function properly - extra work often invisible to
developers. Based on observation, interview and focus group
data collected at a large Swedish hospital, this thesis set
out to investigate the impact of HIT on hospital nurses
from an experience-based perspective, resulting in four
main contributions. First, a method supporting
experience-based data analysis, the HolisticUX method, is
introduced. Second, 13 forms of HIT-induced additional
tasks in nurses' workload are identified, five of which are
not acknowledged in previous research. Third, task
avoidance is identified as a consequence of nurses'
increased workload, negatively affecting patient safety,
care quality and nurses' professional satisfaction.
Finally, four factors are argued to contribute to a
suggested invisibility of the HIT-induced time burden in
nurses' work life to management and developers: 1) lack of
a holistic perspective, 2) the hidden cost of a single
click, 3) the invisibility of nursing work, and 4) visible
data, invisible work.}
}
@PhDThesis{ itlic:2018-004,
author = {Charalampos Orfanidis},
title = {Robustness in Low Power Wide Area Networks},
school = it,
department = docs,
year = 2018,
number = {2018-004},
type = {Licentiate thesis},
month = jun,
day = 14,
abstract = {During the past few years we have witnessed an emergence
of Wide Area Networks in the Internet of Things area. There
are several new technologies like LoRa, Wi-SUN, Sigfox,
that offer long range communication and low power for
low-bitrate applications. These new technologies enable new
application scenarios, such as smart cities, smart
agriculture, and many more. However, when these networks
co-exist in the same frequency band, they may cause
problems to each other since they are heterogeneous and
independent. Therefore it is very likely to have frame
collisions between the different networks.
In this thesis we first explore how tolerant these networks
are to Cross Technology Interference (CTI). CTI can be
described as the interference from heterogeneous wireless
technologies that share the same frequency band and is able
to affect the robustness and reliability of the network. In
particular, we select two of them, LoRa and Wi-SUN and
carry out a series of experiments with real hardware using
several configurations. In this way, we quantify the
tolerance of cross technology interference of each network
against the other as well as which configuration settings
are important.
The next thing we explored is how well channel sensing
mechanisms can detect the other network technologies and
how they can be improved. For exploring these aspects, we
used the default Clear Channel Assessment (CCA) mechanism
of Wi-SUN against LoRa interference and we evaluated how
accurate it is. We also improved this mechanism in order to
have higher accuracy detection against LoRa interference.
Finally, we propose an architecture for WSNs which will
enable flexible re-configuration of the nodes. The idea is
based on Software Defined Network (SDN) principles and
could help on our case by re-configuring a node in order to
mitigate the cross-technology interference from other
networks.}
}
@PhDThesis{ itlic:2018-003,
author = {Fredrik Olsson},
title = {Modeling and Assessment of Human Balance and Movement
Disorders Using Inertial Sensors},
school = it,
department = syscon,
year = 2018,
number = {2018-003},
type = {Licentiate thesis},
month = may,
day = 29,
abstract = {Inertial sensors and magnetometers are abundant in today's
society, where they can be found in many of our everyday
electronic devices, such as smart phones or smart watches.
Their primary function is to measure the movement and
orientation of the device and provide this information for
the apps that request it.
This licenciate thesis explores the use of these types of
sensors in biomedical applications. Specifically, how these
sensors can be used to analyze human movement and work as a
tool for assessment of human balance and movement
disorders. The methods presented in this thesis deal with
mathematical modeling of the sensors, their relationship to
the biomechanical models that are used to describe the
dynamics of human movement and how we can combine these
models to describe the mechanisms behind human balance and
quantify the symptopms of movement disorders.
The main contributions come in the form of four papers. A
practical calibration method for accelerometers is
presented in Paper I, that deals with compensation of
intrinsic sensor errors that are common for relatively
cheap sensors that are used in e.g. smart phones. In Paper
II we present an experimental evaluation and minor
extension of methods that are used to determine the
position of the joints in the biomecanical model, using
inertial sensor data alone. Paper III deals with system
identification of nonlinear controllers operating in closed
loop, which is a method that can be used to model the
neuromuscular control mechanisms behind human balance. In
Paper IV we propose a novel method for quantification of
hand tremor, a primary symptom of neurological disorders
such as Parkinson's disease (PD) or Essential tremor (ET),
where we make use of data collected from sensors in a smart
phone. The thesis also contains an introduction to the
sensors, biomechanical modeling, neuromuscular control and
the various estimation and modeling techniques that are
used throughout the thesis.}
}
@PhDThesis{ itlic:2018-002,
author = {Tatiana Chistiakova},
title = {Ammonium Based Aeration Control in Wastewater Treatment
Plants - Modelling and Controller Design},
school = it,
department = syscon,
year = 2018,
number = {2018-002},
type = {Licentiate thesis},
month = apr,
day = 12,
abstract = {Wastewater treatment involves many processes and methods
which make a treatment plant a large-scaled and complex
system. A fundamental challenge is how to maintain a high
process efficiency while keeping the operational costs low.
The variety in plant configurations, the nonlinear
behaviour, the large time delays and saturations present in
the system contribute to making automation and monitoring a
demanding task.
The biological part of a wastewater treatment process
includes an aeration of the water and this process has been
shown to often result in the highest energy consumption of
the plant. Oxygen supply is a fundamental part of the
activated sludge process used for aerobic microorganisms
growing. The concentration of the dissolved oxygen should
be high enough to maintain a sufficient level of biological
oxidation. However, if the concentration is too high the
process efficiency is significantly reduced leading to a
too high energy consumption. Hence, there are two
motivations behind the aeration control task: process
efficiency and economy. One of the possible strategies to
adjust the dissolved oxygen level in a nitrifying activated
sludge process is to use ammonium feedback measurements.
In this thesis, an activated sludge process is modelled and
analysed in terms of dissolved oxygen to ammonium dynamics.
First, the data obtained from a simplified Benchmark
Simulation Model no.1 was used to identify the system. Both
linear and nonlinear models were evaluated. A model with a
Hammerstein structure where the nonlinearity was described
by a Monod function was chosen for a more thorough study.
Here, a feedback controller was designed to achieve
$\mathcal{L}_2$-stability. The stability region was
pre-computed to determine the maximum allowed time delay
for the closed loop system. Finally, a feedforward
controller was added to the system, and shown to
significantly improve the disturbance rejection
properties.}
}
@PhDThesis{ itlic:2018-001,
author = {Kim-Anh Tran},
title = {Static Instruction Scheduling for High Performance on
Energy-Efficient Processors},
school = it,
department = docs,
year = 2018,
number = {2018-001},
type = {Licentiate thesis},
month = jan,
day = 17,
abstract = {New trends such as the internet-of-things and smart homes
push the demands for energy-efficiency. Choosing
energy-efficient hardware, however, often comes as a
trade-off to high-performance. In order to strike a good
balance between the two, we propose software solutions to
tackle the performance bottlenecks of small and
energy-efficient processors.
One of the main performance bottlenecks of processors is
the discrepancy between processor and memory speed, known
as the memory wall. While the processor executes
instructions at a high pace, the memory is too slow to
provide data in a timely manner, if data has not been
cached in advance. Load instructions that require an access
to memory are thereby referred to as long-latency or
delinquent loads. Long latencies caused by delinquent loads
are putting a strain on small processors, which have few or
no resources to effectively hide the latencies. As a
result, the processor may stall.
In this thesis we propose compile-time transformation
techniques to mitigate the penalties of delinquent loads on
small out-of-order processors, with the ultimate goal to
avoid processor stalls as much as possible. Our code
transformation is applicable for general-purpose code,
including unknown memory dependencies, complex control flow
and pointers. We further propose a software-hardware
co-design that combines the code transformation technique
with lightweight hardware support to hide latencies on a
stall-on-use in-order processor. }
}
@PhDThesis{ itlic:2017-003,
author = {Oscar Samuelsson},
title = {Fault Detection in Water Resource Recovery Facilities},
school = it,
department = syscon,
year = 2017,
number = {2017-003},
type = {Licentiate thesis},
month = oct,
day = 20,
abstract = {Reliable sensor values are important for
resource-efficient control and op-erations of wastewater
treatment processes. Automatic fault detection meth-ods are
necessary to monitor the increasing amount of data produced
in any modern water resource recovery facility (WRRF). Most
on-line measure-ments exhibit large variations under normal
conditions, due to considerable variations in the influent
flow. The work reported in this licentiate thesis deals
with fault detection in WRRFs.
In the first paper, we studied how Gaussian process
regression (GPR), a probabilistic machine learning method,
could be applied for fault detection in WRRFs. The results
showed that the standard parameter estimation meth-od for
GPR suffered from local optima which could be solved by
instead estimating the distribution of the parameters with
a sequential Monte Carlo algorithm (GPR-SMC). The GPR-SMC
allowed for automatic estimation of missing data in a
simulated influent flow signal with high noise, which is a
representative signal for on-line sensors in WRRFs. In
addition, the GPR-SMC provided uncertainty predictions for
the estimated data and accurate sensor noise estimates.
Care should be taken in selecting a suitable kernel for
GPR, since the results were in contrast to the general
assumption that prior knowledge can easily be encoded by
means of selecting a proper kernel. Here, the
autocorrelation graph was found useful as diagnostic tool
for se-lecting a proper kernel.
In the second paper, we studied how active fault detection
(AFD) could be used to reveal information about the sensor
status. The AFD was imple-mented by evaluating the change
in a dissolved oxygen (DO)-signal caused by the sensor's
automatic cleaning system. Fault signatures were obtained
for fouling and several other sensor faults such as a worn
out or mechanically damaged membrane. This demonstrates the
potential of AFD, not only for fault detection, but also
for fault diagnosis. Interestingly, the progression of the
sensor bias due to organic biofilm fouling differed
depending on the measurement technique used within the
DO-sensor. This is new knowledge that is valuable for
process control and should be further studied. The AFD was
implemented on a full scale system to demonstrate its
applicability, which is rarely done in research papers in
the field of WRRFs. }
}
@PhDThesis{ itlic:2017-002,
author = {Germ{\'a}n Ceballos},
title = {Modeling the Interactions Between Tasks and the Memory
System},
school = it,
department = docs,
year = 2017,
number = {2017-002},
type = {Licentiate thesis},
month = oct,
day = 11,
abstract = {Making computer systems more energy efficient while
obtaining the maximum performance possible is key for
future developments in engineering, medicine,
entertainment, etc. However it has become a difficult task
due to the increasing complexity of hardware and software,
and their interactions. For example, developers have to
deal with deep, multi-level cache hierarchies on modern
CPUs, and keep busy thousands of cores in GPUs, which makes
the programming process more difficult.
To simplify this task, new abstractions and programming
models are becoming popular. Their goal is to make
applications more scalable and efficient, while still
providing the flexibility and portability of old, widely
adopted models. One example of this is \emph{task-based
programming}, where simple independent tasks (functions)
are delegated to a runtime system which orchestrates their
execution. This approach has been successful because the
runtime can automatically distribute work across hardware
cores and has the potential to minimize data movement and
placement (e.g., being aware of the cache hierarchy).
To build better runtime systems, it is crucial to
understand bottlenecks in the performance of current and
future multicore systems. In this thesis, we provide fast,
accurate and mathematically-sound models and techniques to
understand the execution of task-based applications
concerning three key aspects: \emph{memory behavior} (data
locality), \emph{scheduling}, and \emph{performance}. With
these methods, we lay the groundwork for improving runtime
system, providing insight into the interplay between the
schedule's behavior, data reuse through the cache
hierarchy, and the resulting performance.}
}
@PhDThesis{ itlic:2017-001,
author = {Diana Yamalova},
title = {Hybrid Observers for Systems with Intrinsic
Pulse-Modulated Feedback},
school = it,
department = syscon,
year = 2017,
number = {2017-001},
type = {Licentiate thesis},
month = mar,
day = 3,
abstract = {This licentiate thesis deals with a special class of
hybrid systems, where the continuous linear part is
controlled by an intrinsic impulsive feedback that
contributes discrete dynamics. The impacting pulsatile
feedback signal is not available for measurement and,
therefore, has to be reconstructed. To estimate all the
elements of the hybrid state vector, an observation problem
is considered.
The motivation for the research performed in this thesis
comes from mathematical modelling of pulsatile endocrine
regulation, where one of the hormones (a releasing hormone)
is secreted in pulses from neurons in the hypothalamus of
the brain. Thus a direct measurement of the concentration
of this hormone in the human is not possible for ethical
reasons and has to be estimated.
Several hybrid observer structures are proposed and
evaluated. The observer design is reduced to a problem of
synchronizing the impulsive sequence produced by the
observer with that of the plant. It utilizes a local
approach of assigning, through the output error feedback in
both the discrete and continuous parts of the plant model,
a guaranteed convergence rate to the local dynamics of a
synchronous mode. Performance of the proposed observer
schemes is analyzed by means of pointwise discrete
(Poincare) maps.
The first two papers of the thesis address the effects of
observer design degrees of freedom on the convergence of
the hybrid state estimation error. A generalization of the
proposed observation scheme to hybrid impulsive systems
with a time delay in continuous part of the plant is
investigated in Paper~III and Paper~IV. }
}
@PhDThesis{ itlic:2016-012,
author = {Peter Backeman},
title = {New Techniques for Handling Quantifiers in Boolean and
First-Order Logic},
school = it,
department = docs,
year = 2016,
number = {2016-012},
type = {Licentiate thesis},
month = dec,
day = 12,
abstract = {The automation of reasoning has been an aim of research
for a long time. Already in 17th century, the famous
mathematician Leibniz invented a mechanical calculator
capable of performing all four basic arithmetic operators.
Although automatic reasoning can be done in different
fields, many of the procedures for automated reasoning
handles formulas of first-order logic. Examples of use
cases includes hardware verification, program analysis and
knowledge representation.
One of the fundamental challenges in first-order logic is
handling quantifiers and the equality predicate. On the one
hand, SMT-solvers (Satisfiability Modulo Theories) are
quite efficient at dealing with theory reasoning, on the
other hand they have limited support for complete and
efficient reasoning with quantifiers. Sequent, tableau and
resolution calculi are methods which are used to construct
proofs for first-order formulas and can use more efficient
techniques to handle quantifiers. Unfortunately, in
contrast to SMT, handling theories is more difficult.
In this thesis we investigate methods to handle quantifiers
by restricting search spaces to finite domains which can be
explored in a systematic manner. We present this approach
in two different contexts.
First we introduce a function synthesis based on
template-based quantifier elimination, which is applied to
gene interaction computation. The function synthesis is
shown to be capable of generating smaller representations
of solutions than previous solvers, and by restricting the
constructed functions to certain forms we can produce
formulas which can more easily be interpreted by a
biologist.
Secondly we introduce the concept of Bounded Rigid
\emph{E}-Unification (BREU), a finite form of unification
that can be used to define a complete and sound sequent
calculus for first-order logic with equality. We show how
to solve this bounded form of unification in an efficient
manner, yielding a first-order theorem prover utilizing
BREU that is competitive with other state-of-the-art
tableau theorem provers. }
}
@PhDThesis{ itlic:2016-011,
author = {Andreas Svensson},
title = {Learning Probabilistic Models of Dynamical Phenomena Using
Particle Filters},
school = it,
department = syscon,
year = 2016,
number = {2016-011},
type = {Licentiate thesis},
month = dec,
day = 16,
abstract = {Dynamical behavior can be seen in many real-life
phenomena, typically as a dependence over time. This thesis
studies and develops methods and probabilistic models for
statistical learning of such dynamical phenomena.
A probabilistic model is a mathematical model expressed
using probability theory. Statistical learning amounts to
constructing such models, as well as adjusting them to data
recorded from real-life phenomena. The resulting models can
be used for, e.g., drawing conclusions about the phenomena
under study and making predictions.
The methods in this thesis are primarily based on the
particle filter and its generalizations, sequential Monte
Carlo (SMC) and particle Markov chain Monte Carlo (PMCMC).
The model classes considered are nonlinear state-space
models and Gaussian processes.
The following contributions are included. Starting with a
Gaussian-process state-space model, a general, flexible and
computationally feasible nonlinear state-space model is
derived in Paper I. In Paper II, a benchmark is performed
between the two alternative state-of-the-art methods SMCs
and PMCMC. Paper III considers PMCMC for solving the
state-space smoothing problem, in particular for an indoor
positioning application. In Paper IV, SMC is used for
marginalizing the hyperparameters in the Gaussian-process
state-space model, and Paper V is concerned with learning
of jump Markov linear state-space models. In addition, the
thesis also contains an introductory overview covering
statistical inference, state-space models, Gaussian
processes and some advanced Monte Carlo methods, as well as
two appendices summarizing some useful technical results.}
}
@PhDThesis{ itlic:2016-010,
author = {Aleksandar Zelji{\'c}},
title = {Approximations and Abstractions for Reasoning about
Machine Arithmetic},
school = it,
department = docs,
year = 2016,
number = {2016-010},
type = {Licentiate thesis},
month = oct,
day = 14,
abstract = {Safety-critical systems rely on various forms of machine
arithmetic to perform their tasks: integer arithmetic,
fixed-point arithmetic or floating-point arithmetic. The
problem with machine arithmetic is that it can exhibit
subtle differences in behavior compared to the ideal
mathematical arithmetic, due to fixed-size representation
in memory. Failure of safety-critical systems is
unacceptable, because it can cost lives or huge amounts of
money, time and effort. To prevent such incidents, we want
to formally prove that systems satisfy certain safety
properties, or otherwise discover cases when the properties
are violated. However, for this we need to be able to
formally reason about machine arithmetic. The main problem
with existing approaches is their inability to scale well
with the increasing complexity of systems and their
properties. In this thesis, we explore two alternatives to
bit-blasting, the core procedure lying behind many common
approaches to reasoning about machine arithmetic.
In the first approach, we present a general approximation
framework which we apply to solve constraints over
floating-point arithmetic. It is built on top of an
existing decision procedure, e.g., bit-blasting. Rather
than solving the original formula, we solve a sequence of
approximations of the formula. Initially very crude, these
approximations are frequently solved very quickly. We use
results from these approximations to either obtain a
solution, obtain a proof of unsatisfiability or generate a
new approximation to solve. Eventually, we will either have
found a solution or a proof that solution does not exist.
The approximation framework improves the solving time and
can solve a number of formulas that the bit-blasting cannot.
In the second approach, we present a novel method to reason
about the theory of fixed-width bit-vectors. This new
decision procedure is called \textsf{mcBV} and it is based
on the model constructing satisfiability
calculus~(\textsf{mcSAT}). The procedure uses a lazy
representation of bit-vectors and attempts to avoid
bit-blasting altogether. It is able to reason about
bit-vectors on both bit- and word-level, leveraging both
Boolean constraint propagation and native arithmetic
reasoning. It also features a greedy explanation
generalization mechanism and is capable of more general
learning compared to existing approaches. \textsf{mcBV} is
able to reason about bit-vectors with sizes that
significantly exceed the usual 32, 64 and 128 bits.
Evaluation of \textsf{mcBV} shows an improvement in
performance (compared to bit-blasting) on several classes
of problems.}
}
@PhDThesis{ itlic:2016-009,
author = {Timofey Mukha},
title = {Inflow Generation for Scale-Resolving Simulations of
Turbulent Boundary Layers},
school = it,
department = tdb,
year = 2016,
number = {2016-009},
type = {Licentiate thesis},
month = sep,
day = 30,
abstract = {Generating inflow fields for scale-resolving simulations
of turbulent flow is crucial for a wide range of
applications and is an active area of research. In this
thesis, a method for inflow generation employing a
precursor turbulent channel flow simulation is proposed. A
procedure for determining the parameters of the precursor
simulation based on the properties of the inflow is given.
To evaluate the performance of the method, results from a
simulation of a flat-plate zero-pressure-gradient turbulent
boundary layer are analysed. The adaption length is
quantified in terms of the development of integral
quantities and the statistical moments of the velocity
field. The performance is also compared with that of a
state-of-the-art rescaling method for the generation of
inflow data. It is shown that both approaches result in
adaption lengths of comparable sizes, which makes the
proposed method an attractive alternative due to its
conceptual simplicity and robustness.
As part of the work on inflow generation, a Python package,
\texttt{eddylicious}, was developed. The purpose of the
package is to be a framework within which various
generation methods can be implemented. The package is
available online under an open-source license. An overview
of the architecture and currently implemented functionality
of the package is given in this thesis.
Furthermore, the results of a preparatory study on
large-eddy simulation of wall-bounded turbulent flows are
discussed. Fully-developed turbulent channel flow is used
as a model problem, and the general-purpose computational
fluid dynamics solver \texttt{OpenFOAM} is employed. The
accuracy of the results with respect to the resolution of
the computational mesh is analysed. Several modelling
approaches for the subgrid scale stresses are considered.}
}
@PhDThesis{ itlic:2016-008,
author = {Simon Sticko},
title = {Towards Higher Order Immersed Finite Elements for the Wave
Equation},
school = it,
department = tdb,
year = 2016,
number = {2016-008},
type = {Licentiate thesis},
month = sep,
day = 16,
abstract = {We consider solving the scalar wave equation using
immersed finite elements. Such a method might be useful,
for instance, in scattering problems when the geometry of
the domain is not known a priori. For hyperbolic problems,
the amount of computational work per dispersion error is
generally lower when using higher order methods. This
serves as motivation for considering a higher order
immersed method.
One problem in immersed methods is how to enforce boundary
conditions. In the present work, boundary conditions are
enforced weakly using Nitsche's method. This leads to a
symmetric weak formulation, which is essential when solving
the wave equation. Since the discrete system consists of
symmetric matrices, having real eigenvalues, this ensures
stability of the semi-discrete problem.
In immersed methods, small intersections between the
immersed domain and the elements of the background mesh
make the system ill-conditioned. This ill-conditioning
becomes increasingly worse when using higher order
elements. Here, we consider resolving this issue using
additional stabilization terms. These terms consist of
jumps in higher order derivatives acting on the internal
faces of the elements intersected by the boundary.}
}
@PhDThesis{ itlic:2016-007,
author = {Volkan Cambazoglou},
title = {Protocol, Mobility and Adversary Models for the
Verification of Security},
school = it,
department = docs,
year = 2016,
number = {2016-007},
type = {Licentiate thesis},
month = sep,
day = 19,
abstract = {The increasing heterogeneity of communicating devices,
ranging from resource constrained battery driven sensor
nodes to multi-core processor computers, challenges
protocol design. We examine security and privacy protocols
with respect to exterior factors such as users,
adversaries, and computing and communication resources; and
also interior factors such as the operations, the
interactions and the parameters of a protocol.
Users and adversaries interact with security and privacy
protocols, and even affect the outcome of the protocols. We
propose user mobility and adversary models to examine how
the location privacy of users is affected when they move
relative to each other in specific patterns while
adversaries with varying strengths try to identify the
users based on their historical locations. The location
privacy of the users are simulated with the support of the
K-Anonymity protection mechanism, the Distortion-based
metric, and our models of users' mobility patterns and
adversaries' knowledge about users.
Security and privacy protocols need to operate on various
computing and communication resources. Some of these
protocols can be adjusted for different situations by
changing parameters. A common example is to use longer
secret keys in encryption for stronger security. We
experiment with the trade-off between the security and the
performance of the Fiat-Shamir identification protocol. We
pipeline the protocol to increase its utilisation as the
communication delay outweighs the computation.
A mathematical specification based on a formal method leads
to a strong proof of security. We use three formal
languages with their tool supports in order to model and
verify the Secure Hierarchical In-Network Aggregation
(SHIA) protocol for Wireless Sensor Networks (WSNs). The
three formal languages specialise on cryptographic
operations, distributed systems and mobile processes.
Finding an appropriate level of abstraction to represent
the essential features of the protocol in three formal
languages was central.}
}
@PhDThesis{ itlic:2016-006,
author = {Anton Axelsson},
title = {Context: The Abstract Term for the Concrete},
school = it,
department = vi2,
year = 2016,
number = {2016-006},
type = {Licentiate thesis},
month = may,
day = 18,
abstract = {This thesis deals with the term `context' and the aim has
been to reason about the term in order to see whether it is
possible to reach a satisfactory understanding of the
concept. But the thesis is also a journey into human
reasoning and conveys a certain view of human cognition. It
aims to synthesise results of studies within psychology,
cognitive science, anthropology, and human-computer
interaction. My understanding is that context is not
something we are a part of, but rather something we create
mentally in relation a specific goal. Determination of
something ambiguous thus comes from top-down processes
related to a goal. I believe context has been wrongly
interpreted in HCI as that which a user is situated
\textit{in} and which a product is being used \textit{in}.
I suggest instead a separation between the user environment
and the user context.}
}
@PhDThesis{ itlic:2016-005,
author = {Ida Bodin},
title = {Cognitive Work Analysis in Practice: Adaptation to Project
Scope and Industrial Context},
school = it,
department = vi2,
year = 2016,
number = {2016-005},
type = {Licentiate thesis},
month = mar,
day = 23,
abstract = {The Cognitive Work Analysis (CWA) framework is widely used
by researchers for the analysis of complex systems. It,
however, lacks the same impact amongst industrial
practitioners. This thesis investigates possible
adaptations of the framework to project and industrial
constraints, and the consequences associated with such
adaptations. Long haul heavy vehicle transportation is the
application domain for the work presented in the thesis.
The CWA framework has been applied within the Methods for
Designing Future Autonomous Systems (MODAS) project.
Adaptations have been made to fit the framework within the
project constraints and the industrial contexts. Interviews
with stakeholders in an industrial organization regarding
possible use of models from the CWA framework have been
made. The CWA was scaled to available resources when
applied within the MODAS project. From this we realized
that prioritization of work activity can have consequences
on the resulting systems ability to handle unforeseen
events. Further, a focus on the current system probed a
rapid out-dating of the analysis due to technical
development. The conclusion is that even if advantages are
lost during adaptation due to practical constraints, the
CWA framework could add value to practitioners within
industry if adapted to the industrial context.}
}
@PhDThesis{ itlic:2016-004,
author = {Kasun Hewage},
title = {Towards a Secure Synchronous Communication Architecture
for Low-power Wireless Networks},
school = it,
department = docs,
year = 2016,
number = {2016-004},
type = {Licentiate thesis},
month = feb,
day = 2,
abstract = {The Internet of Things (IoT) is becoming the future
Internet where most day-to-day devices are connected to the
Internet. These devices are often resource constrained and
use low-power wireless communication. Hence networks of
them are called Low-power and lossy networks (LLNs). LLN
devices may be used in critical applications such as health
care, traffic and industrial plants that concern privacy
and security, thus their communication has to be protected
from malicious activities. LLNs face threats at different
levels ranging from transmitting bits wirelessly to
applications.
In this thesis, we primarily explore LLN security issues
related to application protocols and attacks that target
the availability of LLNs. Particularly, we investigate
compressing messages of a transport security protocol,
DTLS, to make it efficient for LLNs. The IETF proposes to
use DTLS for securing CoAP, a specialized web protocol for
constrained devices. Furthermore, we experimentally study
disrupting the communication of one of the state of the art
LLN protocols, Glossy, by attacking its core mechanism.
Secondarily, we aim at improving the performance of TCP in
LLNs with mobility over a reliable data link protocol. To
this end, we use a Glossy-based communication protocol,
LWB, as a reliable data link protocol. We plan to use the
evaluation of this work as a stepping stone towards
comparing the performance of secure Glossy-based
communication protocols.
The main contributions of this thesis are threefold. We
propose novel message compression mechanisms for DTLS
messages. We also present novel attacks on Glossy, evaluate
the effectiveness of them experimentally, and propose
potential counter measures. Finally, we show that a
reliable data link protocol can improve the performance of
TCP in static and mobile settings.}
}
@PhDThesis{ itlic:2016-003,
author = {Sven-Erik Ekstr{\"o}m},
title = {A Vertex-Centered Discontinuous {G}alerkin Method for Flow
Problems},
school = it,
department = tdb,
year = 2016,
number = {2016-003},
type = {Licentiate thesis},
month = feb,
day = 25,
abstract = {The understanding of flow problems, and finding their
solution, has been important for most of human history,
from the design of aqueducts to boats and airplanes. The
use of physical miniature models and wind tunnels were, and
still are, useful tools for design, but with the
development of computers, an increasingly large part of the
design process is assisted by computational fluid dynamics
(CFD).
Many industrial CFD codes have their origins in the 1980s
and 1990s, when the low order finite volume method (FVM)
was prevalent. Discontinuous Galerkin methods (DGM) have,
since the turn of the century, been seen as the successor
of these methods, since it is potentially of arbitrarily
high order. In its lowest order form DGM is equivalent to
FVM. However, many existing codes are not compatible with
standard DGM and would need a complete rewrite to obtain
the advantages of the higher order.
This thesis shows how to extend existing vertex-centered
and edge-based FVM codes to higher order, using a special
kind of DGM discretization, which is different from the
standard cell-centered type. Two model problems are
examined to show the necessary data structures that need to
be constructed, the order of accuracy for the method, and
the use of an $hp$-adaptation scheme to resolve a
developing shock. Then the method is further developed to
solve the steady Euler equations, within the existing
industrial \textsf{Edge} code, using acceleration
techniques such as local time stepping and multigrid.
With the ever increasing need for more efficient and
accurate solvers and algorithms in CFD, the modified DGM
presented in this thesis could be used to help and
accelerate the adoption of high order methods in industry.}
}
@PhDThesis{ itlic:2016-002,
author = {Rub{\'e}n Cubo},
title = {Mathematical Modeling for Optimization of Deep Brain
Stimulation},
school = it,
department = syscon,
year = 2016,
number = {2016-002},
type = {Licentiate thesis},
month = jan,
day = 29,
abstract = {Deep Brain Stimulation (DBS) consists of sending mild
electric stimuli to the brain via a chronically implanted
lead. The therapy is used to alleviate the symptoms of
different neurological diseases, such as Parkinson's
Disease. However, its underlying biological mechanism is
currently unknown. DBS patients undergo a lengthy
trial-and-error procedure in order to tune the stimuli so
that the treatment achieves maximal therapeutic benefits
while limiting side effects that are often present with
large stimulation values.
The present licentiate thesis deals with mathematical
modeling for DBS, extending it towards optimization.
Mathematical modeling is motivated by the difficulty of
obtaining in vivo measurements from the brain, especially
in humans. It is expected to facilitate the optimization of
the stimuli delivered to the brain and be instrumental in
evaluating the performance of novel lead designs. Both
topics are discussed in this thesis.
First, an analysis of numerical accuracy is presented in
order to verify the DBS models utilized in this study. Then
a performance comparison between a state-of-the-art lead
and a novel field-steering lead using clinical settings is
provided. Afterwards, optimization schemes using
intersection of volumes and electric field control are
described, together with some simplification tools, in
order to speed up the computations involved in the
modeling. }
}
@PhDThesis{ itlic:2016-001,
author = {Victor Shcherbakov},
title = {Radial Basis Function Methods for Pricing Multi-Asset
Options},
school = it,
department = tdb,
year = 2016,
number = {2016-001},
type = {Licentiate thesis},
month = jan,
day = 22,
abstract = {The price of an option can under some assumptions be
determined by the solution of the Black--Scholes partial
differential equation. Often options are issued on more
than one asset. In this case it turns out that the option
price is governed by the multi-dimensional version of the
Black--Scholes equation. Options issued on a large number
of underlying assets, such as index options, are of
particular interest, but pricing such options is a
challenge due to the ``curse of dimensionality''. The
multi-dimensional PDE turn out to be computationally
expensive to solve accurately even in quite a low number of
dimensions.
In this thesis we develop a radial basis function partition
of unity method for pricing multi-asset options up to
moderately high dimensions. Our approach requires the use
of a lower number of node points per dimension than other
standard PDE methods, such as finite differences or finite
elements, thanks to a high order convergence rate. Our
method shows good results for both European style options
and American style options, which allow early exercise. For
the options which do not allow early exercise, the method
exhibits an exponential convergence rate under node
refinement. For options that allow early exercise the
option pricing problem becomes a free boundary problem. We
incorporate two different approaches for handling the free
boundary into the radial basis function partition of unity
method: a penalty method, which leads to a nonlinear
problem, and an operator splitting method, which leads to a
splitting scheme. We show that both methods allow for
locally high algebraic convergence rates, but it turns out
that the operator splitting method is computationally more
efficient than the penalty method. The main reason is that
there is no need to solve a nonlinear problem, which is the
case in the penalty formulation. }
}
@PhDThesis{ itlic:2015-006,
author = {Hanna Holmgren},
title = {Towards Accurate Modeling of Moving Contact Lines},
school = it,
department = tdb,
year = 2015,
number = {2015-006},
type = {Licentiate thesis},
month = nov,
day = 12,
abstract = {The present thesis treats the numerical simulation of
immiscible incompressible two-phase flows with moving
contact lines. The conventional Navier--Stokes equations
combined with a no-slip boundary condition leads to a
non-integrable stress singularity at the contact line. The
singularity in the model can be avoided by allowing the
contact line to slip. Implementing slip conditions in an
accurate way is not straight-forward and different
regularization techniques exist where ad-hoc procedures are
common. This thesis presents the first steps in developing
the macroscopic part of an accurate multiscale model for a
moving contact line problem in two space dimensions. It is
assumed that a micro model has been used to determine a
relation between the contact angle and the contact line
velocity. An intermediate region is introduced where an
analytical expression for the velocity field exists,
assuming the solid wall is perfectly flat. This expression
is used to implement boundary conditions for the moving
contact line, at the macroscopic scale, along a fictitious
boundary located a small distance away from the physical
boundary. Model problems where the shape of the interface
is constant throughout the simulation are introduced. For
these problems, experiments show that the errors in the
resulting contact line velocities converge with the grid
size $h$ at a rate of convergence $p \approx 2$. Further,
an analytical expression for the velocity field in the
intermediate region for the case with a curved solid wall
is derived. The derivation is based on perturbation
analysis. }
}
@PhDThesis{ itlic:2015-005,
author = {Siyang Wang},
title = {Analysis of Boundary and Interface Closures for Finite
Difference Methods for the Wave Equation},
school = it,
department = tdb,
year = 2015,
number = {2015-005},
type = {Licentiate thesis},
month = oct,
day = 27,
abstract = {We consider high order finite difference methods for the
wave equations in the second order form, where the finite
difference operators satisfy the summation-by-parts
principle. Boundary conditions and interface conditions are
imposed weakly by the simultaneous-approximation-term
method, and non-conforming grid interfaces are handled by
an interface operator that is based on either interpolating
directly between the grids or on projecting to piecewise
continuous polynomials on an intermediate grid.
Stability and accuracy are two important aspects of a
numerical method. For accuracy, we prove the convergence
rate of the summation-by-parts finite difference schemes
for the wave equation. Our approach is based on Laplace
transforming the error equation in time, and analyzing the
solution to the boundary system in the Laplace space. In
contrast to first order equations, we have found that the
determinant condition for the second order equation is less
often satisfied for a stable numerical scheme. If the
determinant condition is satisfied uniformly in the right
half plane, two orders are recovered from the boundary
truncation error; otherwise we perform a detailed analysis
of the solution to the boundary system in the Laplace space
to obtain an error estimate. Numerical experiments
demonstrate that our analysis gives a sharp error estimate.
For stability, we study the numerical treatment of
non-conforming grid interfaces. In particular, we have
explored two interface operators: the interpolation
operators and projection operators applied to the wave
equation. A norm-compatible condition involving the
interface operator and the norm related to the SBP operator
is essential to prove stability by the energy method for
first order equations. In the analysis, we have found that
in contrast to first order equations, besides the
norm-compatibility condition an extra condition must be
imposed on the interface operators to prove stability by
the energy method. Furthermore, accuracy and efficiency
studies are carried out for the numerical schemes. }
}
@PhDThesis{ itlic:2015-004,
author = {Pavol Bauer},
title = {Parallelism and Efficiency in Discrete-Event Simulation},
school = it,
department = tdb,
year = 2015,
number = {2015-004},
type = {Licentiate thesis},
month = nov,
day = 5,
note = {PDF updated 2015-10-27 to include the papers.},
abstract = {Discrete-event models depict systems where a discrete
state is repeatedly altered by instantaneous changes in
time, the events of the model. Such models have gained
popularity in fields such as Computational Systems Biology
or Computational Epidemiology due to the high modeling
flexibility and the possibility to easily combine
stochastic and deterministic dynamics. However, the system
size of modern discrete-event models is growing and/or they
need to be simulated at long time periods. Thus, efficient
simulation algorithms are required, as well as the
possibility to harness the compute potential of modern
multicore computers. Due to the sequential design of
simulators, parallelization of discrete event simulations
is not trivial. This thesis discusses event-based modeling
and sensitivity analysis and also examines ways to increase
the efficiency of discrete-event simulations and to scale
models involving deterministic and stochastic spatial
dynamics on a large number of processor cores.}
}
@PhDThesis{ itlic:2015-003,
author = {Fredrik Hellman},
title = {Multiscale and Multilevel Methods for Porous Media Flow
Problems},
school = it,
department = tdb,
year = 2015,
number = {2015-003},
type = {Licentiate thesis},
month = sep,
day = 25,
abstract = {We consider two problems encountered in simulation of
fluid flow through porous media. In macroscopic models
based on Darcy's law, the permeability field appears as
data.
The first problem is that the permeability field generally
is not entirely known. We consider forward propagation of
uncertainty from the permeability field to a quantity of
interest. We focus on computing $p$-quantiles and failure
probabilities of the quantity of interest. We propose and
analyze improved standard and multilevel Monte Carlo
methods that use computable error bounds for the quantity
of interest. We show that substantial reductions in
computational costs are possible by the proposed approaches.
The second problem is fine scale variations of the
permeability field. The permeability often varies on a
scale much smaller than that of the computational domain.
For standard discretization methods, these fine scale
variations need to be resolved by the mesh for the methods
to yield accurate solutions. We analyze and prove
convergence of a multiscale method based on the
Raviart--Thomas finite element. In this approach, a
low-dimensional multiscale space based on a coarse mesh is
constructed from a set of independent fine scale patch
problems. The low-dimensional space can be used to yield
accurate solutions without resolving the fine scale.}
}
@PhDThesis{ itlic:2015-002,
author = {Ali Dorostkar},
title = {Developments in preconditioned iterative methods with
application to glacial isostatic adjustment models},
school = it,
department = tdb,
year = 2015,
number = {2015-002},
type = {Licentiate thesis},
month = may,
day = 29,
abstract = {This study examines the block lower-triangular
preconditioner with element-wise Schur complement as the
lower diagonal block applied on matrices arising from an
application in geophysics. The element-wise Schur
complement is a special approximation of the exact Schur
complement that can be constructed in the finite element
framework. The preconditioner, the exact Schur complement
and the element-wise Schur complement are analyzed
mathematically and experimentally.
The preconditioner is developed specifically for the
glacial isostatic adjustment (GIA) model in its simplified
flat Earth variant, but it is applicable to linear system
of equations with matrices of saddle point form.
In this work we investigate the quality of the element-wise
Schur complement for symmetric indefinite matrices with
positive definite pivot block and show spectral bounds that
are independent of the problem size. For non-symmetric
matrices we use generalized locally Toeplitz (GLT)
sequences to construct a function that asymptotically
describes the spectrum of the involved matrices.
The theoretical results are verified by numerical
experiments for the GIA model. The results show that the
so-obtained preconditioned iterative method converges to
the solution in constant number of iterations regardless of
the problem size or parameters.}
}
@PhDThesis{ itlic:2015-001,
author = {Karl Ljungkvist},
title = {Techniques for Finite Element Methods on Modern
Processors},
school = it,
department = tdb,
year = 2015,
number = {2015-001},
type = {Licentiate thesis},
month = jan,
day = 23,
abstract = {In this thesis, methods for efficient utilization of
modern computer hardware for numerical simulation are
considered. In particular, we study techniques for speeding
up the execution of finite-element methods.
One of the greatest challenges in finite-element
computation is how to efficiently perform the the system
matrix assembly efficiently in parallel, due to its
complicated memory access pattern. The main difficulty lies
in the fact that many entries of the matrix are being
updated concurrently by several parallel threads. We
consider transactional memory, an exotic hardware feature
for concurrent update of shared variables, and conduct
benchmarks on a prototype processor supporting it. Our
experiments show that transactions can both simplify
programming and provide good performance for concurrent
updates of floating point data.
Furthermore, we study a matrix-free approach to
finite-element computation which avoids the matrix
assembly. Motivated by its computational properties, we
implement the matrix-free method for execution on graphics
processors, using either atomic updates or a mesh coloring
approach to handle the concurrent updates. A performance
study shows that on the GPU, the matrix-free method is
faster than a matrix-based implementation for many element
types, and allows for solution of considerably larger
problems. This suggests that the matrix-free method can
speed up execution of large realistic simulations.}
}
@PhDThesis{ itlic:2014-007,
author = {Ram{\=u}nas Gutkovas},
title = {Advancing Concurrent System Verification: Type based
approach and tools},
school = it,
department = csd,
year = 2014,
number = {2014-007},
type = {Licentiate thesis},
month = oct,
day = 20,
abstract = {Concurrent systems, i.e., systems of parallel processes,
are nearly ubiquitous and verifying the correctness of such
systems is becoming an important subject. Many formalisms
were invented for such purpose, however, new types of
systems are introduced and there is a need for handling
larger systems. One examples is wireless sensor networks
that are being deployed in increasing numbers in various
areas; and in particular safety-critical areas, e.g., bush
fire detection. Thus, ensuring their correctness is
important.
A process calculus is a formal language for modeling
concurrent systems. The pi-calculus is a prominent example
of such a language featuring message-passing concurrency.
Psi-calculi is a parametric framework that extends the
pi-calculus with arbitrary data and logics. Psi-calculi
feature a universal theory with its results checked in an
automated theorem prover ensuring their correctness.
In this thesis, we extend psi-calculi expressiveness and
modeling precision by introducing a sort system and
generalised pattern matching. We show that the extended
psi-calculi enjoy the same meta-theoretical results.
We have developed the Pwb, a tool for the psi-calculi
framework. The tool provides a high-level interactive
symbolic execution and automated behavioral equivalence
checking. We exemplify the use of the tool by developing a
high-level executable model of a data collection protocol
for wireless sensor networks.
We are the first to introduce a session types based system
for systems with unreliable communication. Remarkably, we
do not need to add specific extensions to the types to
accommodate such systems. We prove the standard desirable
properties for type systems hold also for our type
system.}
}
@PhDThesis{ itlic:2014-006,
author = {Per Mattsson},
title = {Pulse-modulated Feedback in Mathematical Modeling and
Estimation of Endocrine Systems},
school = it,
department = syscon,
year = 2014,
number = {2014-006},
type = {Licentiate thesis},
month = sep,
day = 9,
abstract = {The research field of systems biology has gained a lot of
interest during the last decades. Systems biology can be
seen as the systematic study of complex interactions in
biological systems, mainly by methods from dynamical
systems theory. This thesis mainly deals with the
testosterone (Te) regulation in the human male, but
techniques developed here might be useful for studying
other parts of the endocrine system too. The contribution
of the thesis can be divided into two parts: one covering
mathematical models of Te regulation and another suggesting
and validating identification techniques for those models.
Regarding modeling, existing models of testosterone
regulation have been extended with time delays and
nonlinear dynamics, with the purpose of achieving better
fit to clinical data. The identification part treats the
estimation of unknown model parameters, and estimation of
signals that can not be measured in a non-invasive way.}
}
@PhDThesis{ itlic:2014-005,
author = {Thomas Lind},
title = {Change and Resistance to Change in Health Care: Inertia in
Sociotechnical Systems},
school = it,
department = vi2,
year = 2014,
number = {2014-005},
type = {Licentiate thesis},
month = jun,
day = 13,
abstract = {This thesis explores change and resistance to change of IT
systems in organisations from a sociotechnical perspective.
The work is drawing on empirical data gathered during two
Action Research projects in Swedish Health Care: one
regarding the deployment of electronic patient record
systems within health care organisations, and the other
regarding the deployment of eHealth services geared towards
patients and citizens. Resistance to change is classified
as an indicator of social inertia, and the concept of
counter-implementation, comprising three general strategies
to obstruct change initiatives, is used to highlight the
political aspects of social inertia. For the analysis, the
concept of social inertia is used as a point of departure
towards inertia in sociotechnical systems by applying
values and principles from sociotechnical systems research,
most prominently the interdependence-characteristic. This
extended concept is used to show and discuss how IT systems
can either enforce change or be a source of inertia
preventing change in organisations, and such planned or
inadvertent effects of implementing IT systems are
discussed as a significant source of user resistance.}
}
@PhDThesis{ itlic:2014-004,
author = {Anne-Kathrin Peters},
title = {The Role of Students' Identity Development in Higher
Education in Computing},
school = it,
department = docs,
year = 2014,
number = {2014-004},
type = {Licentiate thesis},
month = apr,
day = 25,
abstract = {Higher Education Research in science, technology,
engineering, and mathematics (STEM) indicates that students
are not well supported in the process of integrating their
educational experience with their perception of who they
are and want to become. This is associated with drop-out
and also has consequences for student learning. Here,
learning is defined in the broad sense of personal and
professional development.
This thesis presents results from a research project that
explores students' identity development during their first
three years of studies. The analysis and results build on
interview and essay data collected during a longitudinal
study of students in two study programmes at Uppsala
University, Computer and Information Engineering (IT) and
Computer Science (CS). The main body of data analysed for
this thesis was collected from the students at the
beginning and end of their first study year.
A research framework to study identity has been developed.
The notion of identity used in this work has been inspired
by Lave and Wenger's social theory of learning, and theory
of situated learning. Identity, in this work, refers to
students' histories of experiences with a focus on how they
negotiate meaning within the discipline of CS/IT.
The results describe aspects of CS/IT students' identities
and provide a basis from which to discuss the implications
of identity for learning and education, as well as to
reason about how identity development can be supported in
CS/IT education.}
}
@PhDThesis{ itlic:2014-003,
author = {Liang Dai},
title = {On Several Sparsity Related Problems and the Randomized
{K}aczmarz Algorithm},
school = it,
department = syscon,
year = 2014,
number = {2014-003},
type = {Licentiate thesis},
month = apr,
day = 9,
abstract = {This thesis studies several problems related to recovery
and estimation. Specifically, these problems are about
sparsity and low-rankness, and the randomized Kaczmarz
algorithm. This thesis includes four papers referred to as
Paper A, Paper B, Paper C, and Paper D.
Paper A considers how to make use of the fact that the
solution to an overdetermined system is sparse. This paper
presents a three-stage approach to accomplish the task. We
show that this strategy, under the assumptions as made in
the paper, achieves the oracle property.
In Paper B, a Hankel-matrix completion problem arising in
system theory is studied. The use of the nuclear norm
heuristic for this basic problem is considered. Theoretical
justification for the case of a single real pole is given.
Results show that for the case of a single real pole, the
nuclear norm heuristic succeeds in the matrix completion
task. Numerical simulations indicate that this result does
not always carry over to the case of two real poles.
Paper C discusses a screening approach for improving the
computational performance of the Basis Pursuit De-Noising
problem. The key ingredient for this work is to make use of
an efficient ellipsoid update algorithm. The results of the
experiments show that the proposed scheme can improve the
overall time complexity for solving the problem.
Paper D studies the choice of the probability distribution
for implementing the row-projections in the randomized
Kaczmarz algorithm. This relates to an open question in the
recent literature. The result proves that a probability
distribution resulting in a faster convergence of the
algorithm can be found by solving a related Semi-Definite
Programming optimization problem.}
}
@PhDThesis{ itlic:2014-002,
author = {Johannes Nygren},
title = {Output Feedback Control - Some Methods and Applications},
school = it,
department = syscon,
year = 2014,
number = {2014-002},
type = {Licentiate thesis},
month = mar,
day = 27,
abstract = {This thesis studies some output feedback control laws.
Particularly, iterative learning control (ILC) and
decentralized network based algorithms are studied.
Applications to control of wastewater treatment plants are
outlined. For a linear, discrete time MIMO plant, it is
shown that the size of the global controller gain, also
referred to as the diffusion matrix, plays an important
role in stabilization of a decentralized control system
with possibly non-linear output feedback. Based on
information from a step response experiment of the open
loop system, a controller gain which is sufficient for
stability can be found. For the SISO case, frequency
response expressions are derived for the choice of this
controller gain. The results relate nicely to notions of
optimality and the Nyquist stability criterion. Various
types of ILC algorithms are analysed and numerically
illustrated. In particular, new expressions of the
asymptotic control error variance for adjoint based
iterative learning control (ILC) are derived. It is proven
that the control error variance converges to its minimum if
a decreasing learning gain matrix is used for ILC. In a
simulation study ILC is applied to control a sequencing
batch reactor. It is shown numerically that an adjoint
based ILC outperforms inverse based ILC and model-free,
proportional ILC. A merge of an activated sludge process
simulator and a simulator for a wireless sensor network is
described and used for illustrating some control
performance. Finally, in a numerical optimization study it
is shown that the aeration energy can be decreased if many
dissolved oxygen sensors are used for aeration control in a
biological reactor for nitrogen removal. This results may
support future use of inexpensive wireless sensor networks
for control of wastewater treatment plants. }
}
@PhDThesis{ itlic:2014-001,
author = {Daniel Jansson},
title = {Mathematical Modeling of the Human Smooth Pursuit System},
school = it,
department = syscon,
year = 2014,
number = {2014-001},
type = {Licentiate thesis},
month = jan,
day = 21,
abstract = {This licentiate thesis concerns mathematical modeling and
identification of the the human smooth pursuit system (SPS)
and the application of the models to motor symptom
quantification in Parkinson's disease (PD).
The SPS is a complex neuromuscular system governing smooth
pursuit eye movements (SPEM), and the task is to keep a
moving target in the visual field.
Diagnosing and quantifying the disease is done by interview
and clinical observation which requires hours of
interaction between the patient and a qualified clinician.
Acquiring a better understanding of the SPS cast in
mathematical models may be a first step towards developing
a technology that allows for fast and automatic PD staging.
Lately, the increased performance and accessibility of eye
tracking technologies have generated a great deal of
interest in the commercial sector. This thesis presents an
effort towards developing more sophisticated data analysis
techniques in an attempt to extract previously hidden
information from the eye tracking data and to open up for
new more advanced applications.
The SPS relates gaze direction to visual stimuli and may
thus be viewed as a dynamical system with an input and an
output signal. This thesis considers various parametric and
non-parametric black- and grey-box models, both linear and
nonlinear, to portray the SPS. The models are evaluated to
characterize the SPS in different individuals and to look
for discrepancies between the SPS function of healthy
controls and Parkinson patients. It is shown that disease
does indeed impair the system and that the effects are
distinguishable from those of healthy aging.}
}
@PhDThesis{ itlic:2013-007,
author = {Hjalmar Wennerstr{\"o}m},
title = {Meteorological Impact and Transmission Errors in Outdoor
Wireless Sensor Networks},
school = it,
department = docs,
year = 2013,
number = {2013-007},
type = {Licentiate thesis},
month = dec,
day = 17,
abstract = {Wireless sensor networks have been deployed outdoors ever
since their inception. They have been used in areas such as
precision farming, tracking wildlife, and monitoring
glaciers. These diverse application areas all have
different requirements and constraints, shaping the way in
which the sensor network communicates. Yet something they
all share is the exposure to an outdoor environment, which
at times can be harsh, uncontrolled and difficult to
predict. Therefore, understanding the implications of an
outdoor environment is an essential step towards reliable
wireless sensor network operations.
In this thesis we consider aspects of how the environment
influence outdoor wireless sensor networks. Specifically,
we experimentally study how meteorological factors impact
radio links, and find that temperature is most significant.
This motivates us to further study and propose a first
order model describing the impact of temperature on
wireless sensor nodes. We also analyze transmission errors
in an outdoor wireless sensor networks, identifying and
explaining patterns in the way data gets corrupted. The
findings lead to a design and evaluation of an approach for
probabilistic recover of corrupt data in outdoor wireless
sensor networks. Apart from the experimental findings we
have conducted two different outdoor deployments for which
large data sets has been collected, containing both link
and meteorological measurements. }
}
@PhDThesis{ itlic:2013-006,
author = {Kristoffer Virta},
title = {Difference Methods with Boundary and Interface Treatment
for Wave Equations},
school = it,
department = tdb,
year = 2013,
number = {2013-006},
type = {Licentiate thesis},
month = oct,
day = 22,
abstract = {Wave motion in acoustic and elastic media is highly
influenced by the presence of outer boundaries and media
interfaces. The solutions to the equations governing the
wave motion at any point in the domain as a function of
time can be sought either through analytical or numerical
techniques.
This thesis proposes provably stable finite difference
schemes to accurately investigate wave interaction with
boundaries and interfaces. Schemes for the acoustic wave
equation in three spatial coordinates, general domains and
heterogeneous media and the elastic wave equation in two
spatial dimensions and layered media are presented. A study
of the Rayleigh surface wave in almost incompressible media
is carried through. Extensive numerical experiments
designed to verify stability and accuracy as well as
applicability to non - trivial boundary and interface
phenomena are given.}
}
@PhDThesis{ itlic:2013-005,
author = {Emil Kieri},
title = {Numerical Quantum Dynamics},
school = it,
department = tdb,
year = 2013,
number = {2013-005},
type = {Licentiate thesis},
month = oct,
day = 15,
note = {Included papers available at
\url{http://urn.kb.se/resolve?urn=urn:nbn:se:uu:diva-179058},
\url{http://www.it.uu.se/research/publications/reports/2013-019/},
\url{http://www.it.uu.se/research/publications/reports/2013-007/}.}
,
abstract = {We consider computational methods for simulating the
dynamics of molecular systems governed by the
time-dependent Schr{\"o}dinger equation. Solving the
Schr{\"o}dinger equation numerically poses a challenge due
to its often highly oscillatory solutions, and to the
exponential growth of work and memory with the number of
particles in the system.
Two different classes of problems are studied: the dynamics
of the nuclei in a molecule and the dynamics of an electron
in orbit around a nucleus. For the first class of problems
we present new computational methods which exploit the
relation between quantum and classical dynamics in order to
make the computations more efficient. For the second class
of problems, the lack of regularity in the solution poses a
computational challenge. Using knowledge of the non-smooth
features of the solution we construct a new method with two
orders higher accuracy than what is achieved by direct
application of a difference stencil.}
}
@PhDThesis{ itlic:2013-004,
author = {{\AA}man Pohjola, Johannes},
title = {Bells and Whistles: Advanced Language Features in
Psi-Calculi},
school = it,
department = csd,
year = 2013,
number = {2013-004},
type = {Licentiate thesis},
month = oct,
day = 4,
abstract = {Psi-calculi is a parametric framework for process calculi
similar to popular pi-calculus extensions such as the
explicit fusion calculus, the applied pi-calculus and the
spi calculus. Remarkably, machine-checked proofs of
standard algebraic and congruence properties of
bisimilarity apply to every instance of the framework.
The contribution of this licentiate thesis is to
significantly extend the applicability and expressiveness
of psi-calculi by incorporating several advanced language
features into the framework: broadcasts, higher-order
communication, generalised pattern matching, sorts and
priorities. The extensions present several interesting
technical challenges, such as negative premises. The
machine-checked proofs for standard results about
bisimilarity are generalised to each of these new settings,
and the proof scripts are freely available online. }
}
@PhDThesis{ itlic:2013-003,
author = {Daniel Elfverson},
title = {On Discontinuous Galerkin Multiscale Methods},
school = it,
department = tdb,
year = 2013,
number = {2013-003},
type = {Licentiate thesis},
month = jun,
day = 4,
abstract = {In this thesis a new multiscale method, the discontinuous
Galerkin multiscale method, is proposed. The method uses
localized fine scale computations to correct a global
coarse scale equation and thereby takes the fine scale
features into account. We show a priori error bounds for
convection dominated diffusion-convection-reaction problems
with variable coefficients. We also present a posteriori
error bound in the case of no convection or reaction and
present an adaptive algorithm which tunes the method
parameters automatically. We also present extensive
numerical experiments which verify our analytical
findings.}
}
@PhDThesis{ itlic:2013-002,
author = {Marcus Holm},
title = {Scientific Computing on Hybrid Architectures},
school = it,
department = tdb,
year = 2013,
number = {2013-002},
type = {Licentiate thesis},
month = may,
day = 31,
abstract = {Modern computer architectures, with multicore CPUs and
GPUs or other accelerators, make stronger demands than ever
on writers of scientific code. As a rule of thumb, the
fastest, most efficient program consists of labor-
intensive code written by expert programmers for a certain
application on a particular computer. This thesis deals
with several algorithmic and technical approaches towards
effectively satisfying the demand for high-performance
parallel programming without incurring such a high cost in
expert program- mer time. Effective programming is
accomplished by writing performance- portable code where
performance-critical functionality is provided either by
external software or at least a balance between
maintainability/generality and efficiency.}
}
@PhDThesis{ itlic:2013-001,
author = {Olov Ros{\'e}n},
title = {Parallelization of Stochastic Estimation Algorithms on
Multicore Computational Platforms},
school = it,
department = syscon,
year = 2013,
number = {2013-001},
type = {Licentiate thesis},
month = apr,
day = 19,
abstract = {The main part of this licentiate thesis concerns
parallelization of recursive estimation methods, both
linear and nonlinear. Recursive estimation deals with the
problem of extracting information about parameters or
states of a dynamical system, given noisy measurements of
the system output and plays a central role in many
applications of signal processing, system identification,
and automatic control. Solving the recursive Bayesian
estimation problem is known to be computationally
expensive, which often makes the methods infeasible in
real-time applications and for problems of large dimension.
As the computational power of the hardware is today
increased by adding more processors on a single chip rather
than increasing the clock frequency and shrinking the logic
circuits, parallelization is the most powerful way of
improving the execution time of an algorithm. It has been
found in this thesis that several of the optimal filtering
methods are suitable for parallel implementation, in
certain ranges of problem sizes. It has been concluded from
the experiments that substantial improvements can be
achieved by performing "tailor"-made parallelization,
compared to straightforward implementations based on
multi-threaded libraries. For many of the suggested
parallelizations, a linear speedup in the number of cores
has been achieved that have provided up to 8 times speedup
on a double quad-core computer. As the evolution of the
parallel computer architectures is unfolding rapidly, many
more processors on the same chip will become available. The
developed methods do not, of course, scale infinitely, but
definitely can exploit and harness some of the
computational power of the next generation of parallel
platforms, allowing for optimal state estimation in
real-time applications.}
}
@PhDThesis{ itlic:2012-009,
author = {Andreas Sembrant},
title = {Efficient Techniques for Detecting and Exploiting Runtime
Phases},
school = it,
department = docs,
year = 2012,
number = {2012-009},
type = {Licentiate thesis},
month = dec,
day = 21,
abstract = {Most applications have time-varying runtime phase
behavior. For example, alternating between memory-bound and
compute-bound phases. Nonetheless, the predominant approach
in computer research has been to categorize an application
based on its average runtime behavior. However, this can be
misleading since the application may appear to be neither
memory nor compute bound.
In this thesis we introduce tools and techniques to enable
researchers and software developers to capture the true
time-varying behavior of their applications. To do so, we
1) develop an efficient technique to detect runtime phases,
2) give new insight into applications' runtime phase
behaviors using this technique, and, finally, 3) explore
different ways to exploit runtime phase detection.
The results are ScarPhase, a low-overhead phase detection
library, and three new methods for exploring applications'
phase behaviors with respect to: 1) cache performance as a
function of cache allocation, 2) performance when sharing
cache with co-running applications, and finally, 3)
performance and power as a function of processor frequency.
These techniques enable us to better understand
applications' performance and how to adapt different
settings to runtime changes. Ultimately, this insight allow
us to create new faster and more power efficient
applications and runtime systems that can better handle the
increasing computation demands and power constraints of
tomorrow's problems.}
}
@PhDThesis{ itlic:2012-008,
author = {Palle Raabjerg},
title = {Extending Psi-calculi and their Formal Proofs},
school = it,
department = csd,
year = 2012,
number = {2012-008},
type = {Licentiate thesis},
month = nov,
day = {14},
abstract = {Psi-calculi is a parametric framework for extensions of
the pi-calculus, with arbitrary data structures and logical
assertions for facts about data. This thesis presents
broadcast psi-calculi and higher-order psi-calculi, two
extensions of the psi-calculi framework, allowing
respectively one-to-many communications and the use of
higher-order process descriptions through conditions in the
parameterised logic. Both extensions preserve the purity of
the psi-calculi semantics; the standard congruence and
structural properties of bisimilarity are proved formally
in Isabelle. The work going into the extensions show that
depending on the specific extension, working out the formal
proofs can be a work-intensive process. We find that some
of this work could be automated, and implementing such
automation may facilitate the development of future
extensions to the psi-calculi framework.}
}
@PhDThesis{ itlic:2012-007,
author = {Martins da Silva, Margarida},
title = {System Identification and Control for General Anesthesia
based on Parsimonious {W}iener Models},
school = it,
department = syscon,
year = 2012,
number = {2012-007},
type = {Licentiate thesis},
month = oct,
day = 17,
abstract = {The effect of anesthetics in the human body is usually
described by Wiener models. The high number of
patient-dependent parameters in the standard models, the
poor excitatory pattern of the input signals (administered
anesthetics) and the small amount of available input-output
data make application of system identification strategies
difficult.
The idea behind this thesis is that, by reducing the number
of parameters to describe the system, improved results may
be achieved when system identification algorithms and
control strategies based on those models are designed. The
choice of the appropriate number of parameters matches the
parsimony principle of system identification.
The three first papers in this thesis present Wiener models
with a reduced number of parameters for the neuromuscular
blockade and the depth of anesthesia. Batch and recursive
system identification algorithms are presented. Taking
advantage of the small number of continuous time model
parameters, adaptive controllers are proposed in the two
last papers. The controller structure combines an inversion
of the static nonlinearity of the Wiener model with a
linear controller for the exactly linearized system, using
the parameter estimates obtained recursively by an extended
Kalman filter. The performance of the adaptive nonlinear
controllers is tested in a database of realistic patients
with good results. }
}
@PhDThesis{ itlic:2012-006,
author = {Martin Tillenius},
title = {Leveraging Multicore Processors for Scientific Computing},
school = it,
department = tdb,
year = 2012,
number = {2012-006},
type = {Licentiate thesis},
month = sep,
day = 28,
abstract = {This thesis deals with how to develop scientific computing
software that runs efficiently on multicore processors. The
goal is to find building blocks and programming models that
increase the productivity and reduce the probability of
programming errors when developing parallel software.
In our search for new building blocks, we evaluate the use
of hardware transactional memory for constructing atomic
floating point operations. Using benchmark applications
from scientific computing, we show in which situations this
achieves better performance than other approaches.
Driven by the needs of scientific computing applications,
we develop a programming model and implement it as a
reusable library. The library provides a run-time system
for executing tasks on multicore architectures, with
efficient and user-friendly management of dependencies. Our
results from scientific computing benchmarks show excellent
scaling up to at least 64 cores. We also investigate how
the execution time depend on the task granularity, and
build a model for the performance of the task library.}
}
@PhDThesis{ itlic:2012-005,
author = {Egi Hidayat},
title = {On Identification of Endocrine Systems},
school = it,
department = syscon,
year = 2012,
number = {2012-005},
type = {Licentiate thesis},
month = jun,
day = 1,
abstract = {System identification finds nowadays application in
various areas of biomedical research as a tool of empiric
mathematical modeling and model individualization. Hormone
regulation is a classical example of biological feedback
where control theories, in general, and system
identification, in particular, are indispensable in
unraveling the regulation mechanisms and explicating the
complex dynamical phenomena arising in endocrine systems.
The main function of endocrine feedback regulation is to
maintain the hormone levels within a particular
physiological range as well as to sustain an appropriate
hormone secretion pattern. Therefore, a natural operating
mode of a closed-loop endocrine system is a stable periodic
cycle. This property significantly reduces the repertoire
of readily available identification techniques, not least
due to the fact that the regulation (input) signal is
immeasurable in many practical cases.
There are two approaches to blind identification of hormone
dynamics presented in this thesis. The first one is based
on constrained nonlinear least-squares method. Weighting
functions play an important role in satisfying the
biological conditions on the identified model. The second
approach is derived from a novel time-delay system
identification method in Laguerre domain. In the latter,
the time delay appears due to a specific input signal model
and is estimated along with the finite-dimensional dynamics
of hormone kinetics. }
}
@PhDThesis{ itlic:2012-004,
author = {Soma Tayamon},
title = {Nonlinear System Identification with Applications to
Selective Catalytic Reduction Systems},
school = it,
department = syscon,
year = 2012,
number = {2012-004},
type = {Licentiate thesis},
month = jun,
day = 7,
abstract = {The stringent regulations on the emissions levels of heavy
duty vehicles create a demand for new methods of reducing
harmful emissions from the engine. In order to be able to
follow these increasingly stricter legislations, complex
aftertreatment systems are used. Achievement of optimal
performance of these systems requires accurate models that
can be used for control design. As a result, the interest
in modelling and control of aftertreatment systems has
increased.
This thesis deals with the modelling of the nitrogen oxide
(NO$_x$) emissions from heavy duty vehicles using the
selective catalyst as an aftertreatment system for its
reduction. The process of the selective catalytic reduction
(SCR) is nonlinear since the chemical reactions involved
are highly depending on the operating point. The momentary
operating point is defined by the driving profile of the
vehicle which, for example, includes cold and hot engine
starts, highway and urban driving.
The purpose of this thesis is to investigate different
methods for nonlinear system identification of SCR systems
with control in mind. The first two papers contain the
theoretical work of this thesis. The first paper deals with
improvement of an existing recursive prediction error
method (RPEM) where a more accurate discretisation
algorithm was used to improve the accuracy of the estimated
nonlinear model. The second paper deals with analysis of
the convergence properties of the algorithm. For this
analysis several conditions were formulated that link the
global and local convergence properties of the algorithm to
stability properties of an associated differential
equation. Global convergence to a stationary point was
shown. In the third paper, the RPEM is used for
identification of the SCR system and finally the fourth
paper a Hammerstein-Wiener model for identification of the
SCR system is applied. In both these cases the black-box
models could predict the NO$_x$ behaviour of the SCR system
quite well. The nonlinear models were shown to describe the
SCR system more accurately than linear models.}
}
@PhDThesis{ itlic:2012-003,
author = {Magnus Gustafsson},
title = {Towards an Adaptive Solver for High-Dimensional {PDE}
Problems on Clusters of Multicore Processors},
school = it,
department = tdb,
year = 2012,
number = {2012-003},
type = {Licentiate thesis},
month = mar,
day = 4,
note = {Included papers available at
\url{http://dx.doi.org/10.1007/978-3-642-11795-4_44},
\url{http://dx.doi.org/10.1007/978-3-642-28145-7_36},
\url{http://www.it.uu.se/research/publications/reports/2011-022}
and
\url{http://www.it.uu.se/research/publications/reports/2012-001}.}
,
abstract = {Accurate numerical simulation of time-dependent phenomena
in many spatial dimensions is a challenging computational
task apparent in a vast range of application areas, for
instance quantum dynamics, financial mathematics, systems
biology and plasma physics. Particularly problematic is
that the number of unknowns in the governing equations (the
number of grid points) grows exponentially with the number
of spatial dimensions introduced, often referred to as the
curse of dimensionality. This limits the range of problems
that we can solve, since the computational effort and
requirements on memory storage directly depend on the
number of unknowns for which to solve the equations.
In order to push the limit of tractable problems, we are
developing an implementation framework, HAParaNDA, for
high-dimensional PDE-problems. By using high-order accurate
schemes and adaptive mesh refinement (AMR) in space, we aim
at reducing the number of grid points used in the
discretization, thereby enabling the solution of larger and
higher-dimensional problems. Within the framework, we use
structured grids for spatial discretization and a
block-decomposition of the spatial domain for
parallelization and load balancing. For integration in
time, we use exponential integration, although the
framework allows the flexibility of other integrators to be
implemented as well. Exponential integrators using the
Lanzcos or the Arnoldi algorithm has proven a succesful and
efficient approach for large problems. Using a truncation
of the Magnus expansion, we can attain high levels of
accuracy in the solution.
As an example application, we have implemented a solver for
the time-dependent Schr{\"o}dinger equation using this
framework. We provide scaling results for small and medium
sized clusters of multicore nodes, and show that the solver
fulfills the expected rate of convergence.}
}
@PhDThesis{ itlic:2012-002,
author = {Fredrik Bjurefors},
title = {Measurements in Opportunistic Networks},
school = it,
department = docs,
year = 2012,
number = {2012-002},
type = {Licentiate thesis},
month = mar,
day = 2,
abstract = {Opportunistic networks are a subset of delay tolerant
networks where the contacts are unscheduled. Such networks
can be formed ad hoc by wireless devices, such as mobile
phones and laptops. In this work we use a data-centric
architecture for opportunistic networks to evaluate data
dissemination overhead, congestion in nodes' buffer, and
the impact of transfer ordering. Dissemination brings an
overhead since data is replicated to be spread in the
network and overhead leads to congestion, i.e., overloaded
buffers.
We develop and implement an emulation testbed to
experimentally evaluate properties of opportunistic
networks. We evaluate the repeatability of experiments in
the emulated testbed that is based on virtual computers. We
show that the timing variations are on the order of
milliseconds.
The testbed was used to investigate overhead in data
dissemination, congestion avoidance, and transfer ordering
in opportunistic networks. We show that the overhead can be
reduced by informing other nodes in the network about what
data a node is carrying. Congestion avoidance was evaluated
in terms of buffer management, since that is the available
tool in an opportunistic network, to handle congestion. It
was shown that replication information of data objects in
the buffer yields the best results. We show that in a
data-centric architecture were each data item is valued
differently, transfer ordering is important to achieve
delivery of the most valued data.}
}
@PhDThesis{ itlic:2012-001,
author = {Gunnika Isaksson-Lutteman},
title = {Future Train Traffic Control -- Development and deployment
of new principles and systems in train traffic control},
school = it,
department = hci,
year = 2012,
number = {2012-001},
type = {Licentiate thesis},
month = apr,
day = 3,
abstract = {The train traffic control system of the future requires
new solutions and strategies in order to better meet
tomorrow's demands and goals. Uppsala University and
Trafikverket has been collaborating for several years in
research regarding train traffic control and how to improve
traffic controllers' work environment. At an early stage in
the collaboration there have been studies and analysis of
important aspects of the traffic controller's tasks,
strategies, decision making, use of information and support
systems. This research resulted in new control paradigms,
from control by exception to control by re-planning. By
using this paradigm we developed and designed prototype
systems and interfaces that better could meet future goals
and contribute to more optimal use of infrastructure
capacity. Based on this research, a new operational traffic
control system called STEG, was developed in an iterative
and user-centred design process. The system was deployed
and tested operatively at a train traffic control centre in
Sweden. The following evaluations focused on what happens
when STEG is introduced in train traffic controllers' work
places. The evaluation of STEG showed satisfied users with
a feeling of active involvement during the design and
deployment processes, and the new control strategies are
functioning. STEG was seen as successful and was thereafter
developed into MULTI-STEG, intended to be used by several
users simultaneously and supporting them to share
information in a new way. MULTI-STEG was deployed and
tested at another train traffic control centre in Sweden.
The following evaluations of MULTI-STEG focused on what
happens when several users are involved and how train
traffic controllers felt of sharing information with each
other that before have been only in their own minds. Some
complications occurred due to mistakes in the deployment
process, but all together the evaluation showed positive
attitudes towards the new system and MULTI-STEG was
received as an efficient system for train traffic control.
The main results are that STEG and MULTI-STEG can be used
as an efficient train traffic control system and the new
system can reduce the unnecessary cognitive load that the
traffic controllers suffer from with today's system. Also
the deployment process is crucial whether the users are
going to accept a new system or not. STEG was developed in
a user-centred design process, but it is important that the
deployment process is user-centered as well.}
}
@PhDThesis{ itlic:2011-006,
author = {Anette L{\"o}fstr{\"o}m},
title = {Intranet Use as a Leadership Strategy},
school = it,
department = hci,
year = 2011,
number = {2011-006},
type = {Licentiate thesis},
month = dec,
day = 9,
abstract = {This thesis presents results from an investigation of a
virtual leadership strategy, which is utilised in the city
of Stockholm. The Intranet is used as a strategic tool to
implement a steering document in a similar way among all
employees in the organisation. In this frame, features like
sensemakings of the distributed information, influences of
experienced lifeworlds, circumstances at local workplaces,
cultural aspects and technological issues are explored.
The investigation is fully qualitative. Interviews have
been processed, recorded and transcribed. A survey with
open and unstructured questions has broadened empirical
results.
The aim is to explore opinions towards and driving forces
behind taking part of an Intranet based leader strategy and
to investigate what circumstances at local workplaces
affect the potential success of such a leader strategy.
A new interpretation of Geert Hofstede~s work is suggested
in future works. This is framed in a cultural model.}
}
@PhDThesis{ itlic:2011-005,
author = {Elena Sundkvist},
title = {A High-Order Accurate, Collocated Boundary Element Method
for Wave Propagation in Layered Media},
school = it,
department = tdb,
year = 2011,
number = {2011-005},
type = {Licentiate thesis},
month = sep,
day = 9,
abstract = {The ultimate goal of this research is to construct a
hybrid model for sound propagation in layered underwater
environments with curved boundaries by employing a
differential formulation for inhomogeneous layers and a
boundary integral formulation for homogeneous layers. The
discretization of the new hybrid model is a combination of
a finite difference method for the Helmholtz equation for
inhomogeneous media and a collocated boundary element
method (BEM) for the integral equation for homogeneous
media, while taking special care of the open boundaries and
the common interface.
Our focus is on sound wave propagation in layered piecewise
\emph{homogeneous} fluid media. A boundary integral
formulation of the Helmholtz equation governing the
acoustic pressure is employed. A fourth-order accurate,
collocated BEM is constructed for this equation in a
systematic way, facilitating its generalization to even
higher orders of accuracy.
A novel approach (for boundary element techniques) is
proposed for modelling the open vertical boundaries. We
introduce artificial near- and far-field boundaries and
apply nonlocal radiation boundary conditions at these.
The collocated BEM is implemented in \textsc{Matlab} and
the numerical experiments show the expected convergence
rate.
A strong benefit of the collocated BEM is that only the
boundary is discretized, thus, reducing the number of
dimensions by one. By a comparison with a fourth-order
finite difference method (FD) it is illustrated that both
methods have memory requirements of the same order,
however, the number of unknowns in the collocated BEM is an
order of magnitude less than in FD and the ratio grows with
the problem size. }
}
@PhDThesis{ itlic:2011-004,
author = {Niclas Finne},
title = {Towards Adaptive Sensor Networks},
school = it,
department = docs,
year = 2011,
number = {2011-004},
type = {Licentiate thesis},
month = may,
day = 30,
abstract = {Wireless sensor networks consist of many small embedded
devices that are equipped with sensors and a wireless
communication unit. These devices, or sensor nodes, are
typically low cost, resource constrained and
battery-powered. Sensor network applications include
environmental monitoring, industrial condition monitoring,
building surveillance, and intelligent homes.
Sensor network applications today are often developed
either using standard software components which enables
simpler development but leads to far from optimal
performance, or software customized for the specific
application which complicates development, software
updates, and software reuse.
We suggest that logic is separated from configuration and
other information, for instance, network statistics and
estimated power consumption. Software components publish
their configuration and other information using a
generalized programming abstraction. Configuration policies
are separate modules that react on changes and reconfigure
the sensor node as needed. These configuration policies are
responsible for coordinating the configuration between the
components and optimize the sensor network towards the
application objectives.
One of our contributions is that we demonstrate the need
for self-monitoring and self-configuration based on
experiences from two deployed sensor networks. Our main
contribution is that we devise a configuration architecture
that solves the problem of cross-layer optimization for
sensor network applications without tight coupling between
components, thus enabling standard and easily replaceable
components to be used. The configuration architecture makes
it possible to reach the same level of performance as
specialized cross-layer optimizations but without adding
application-specific knowledge to the components.}
}
@PhDThesis{ itlic:2011-003,
author = {Rebecka Janols},
title = {Tailor the System or Tailor the User? How to Make Better
Use of Electronic Patient Record Systems},
school = it,
department = hci,
year = 2011,
number = {2011-003},
type = {Licentiate thesis},
month = may,
day = 6,
abstract = {Health care organisations are extremely complex because
they consist of heterogeneous groups of people (clinical
professions, patients, managers), use advanced technology
(medical devices and patient record systems), and apply
many organisational and clinical routines. When introducing
Electronic Patient Record systems (EPR) in health care
organisations, all these aspects get affected. Using a
sociotechnical perspective is necessary in order to get a
``successful'' EPR usage.
The aim of my PhD studies is to provide health care
organisations with knowledge and insights into how they can
improve their organisation and practice in relation to
usage of EPR systems. In my research I have used a grounded
theory methodology for studying, analysing and reflecting
on how electronic patient record systems are used by
professionals in their practice. Studies have been
conducted during a 2.5 years collaborative research
project. Within the studied health care organisation there
are differing opinions if an EPR system is mainly a
technical system or a tool to support the clinical
organisation. This conceptual division leads to an
uncertainty in who is responsible for the proper function
of the EPR system and have a major effect for the
clinicians in their clinical practice. During the research
seven potential problems areas, \emph{mandate, usability,
education, participation, improvements, support} and
\emph{evaluation} have been identified as crucial for the
health care organisation to manage to achieve an effective
EPR usage.
The main results are 1) The health care organisation needs
to establish a problem-solving strategy that questions the
reasons behind the problems occurred, 2) The different
stakeholder groups need to interact, create a better
understanding for each other~s perspective and agree on the
same goal for the EPR system, 3) The clinical organisation
needs help to improve their clinical practice in relation
to the EPR system, 4) The EPR deployment and usage affect
the clinicians in different ways. Their attitude towards
the EPR system is dependent on the usability of the EPR
system, the deployment process, their experience of
participation, education, support and possibilities to
improve the system.}
}
@PhDThesis{ itlic:2011-002,
author = {Xin He},
title = {Robust Preconditioning Methods for Algebraic Problems,
Arising in Multi-Phase Flow Models},
school = it,
department = tdb,
year = 2011,
number = {2011-002},
type = {Licentiate thesis},
month = apr,
day = 18,
pages = 52,
copies-printed= 80,
abstract = {The aim of the project is to construct, analyse and
implement fast and reliable numerical solution methods to
simulate multi-phase flow, modeled by a coupled system
consisting of the time-dependent Cahn-Hilliard and
incompressible Navier-Stokes equations with variable
viscosity and variable density. This thesis mainly
discusses the efficient solution methods for the latter
equations aiming at constructing preconditioners, which are
numerically and computationally efficient, and robust with
respect to various problem, discretization and method
parameters.
In this work we start by considering the stationary
Navier-Stokes problem with constant viscosity. The system
matrix arising from the finite element discretization of
the linearized Navier-Stokes problem is nonsymmetric of
saddle point form, and solving systems with it is the inner
kernel of the simulations of numerous physical processes,
modeled by the Navier-Stokes equations. Aiming at reducing
the simulation time, in this thesis we consider iterative
solution methods with efficient preconditioners. When
discretized with the finite element method, both the
Cahn-Hilliard equations and the stationary Navier-Stokes
equations with constant viscosity give raise to linear
algebraic systems with nonsymmetric matrices of two-by-two
block form. In Paper I we study both problems and apply a
common general framework to construct a preconditioner,
based on the matrix structure. As a part of the general
framework, we use the so-called element-by-element Schur
complement approximation. The implementation of this
approximation is rather cheap. However, the numerical
experiments, provided in the paper, show that the
preconditioner is not fully robust with respect to the
problem and discretization parameters, in this case the
viscosity and the mesh size. On the other hand, for not
very convection-dominated flows, i.e., when the viscosity
is not very small, this approximation does not depend on
the mesh size and works efficiently. Considering the
stationary Navier-Stokes equations with constant viscosity,
aiming at finding a preconditioner which is fully robust to
the problem and discretization parameters, in Paper II we
turn to the so-called augmented Lagrangian (AL) approach,
where the linear system is transformed into an equivalent
one and then the transformed system is iteratively solved
with the AL type preconditioner. The analysis in Paper II
focuses on two issues, (1) the influence of a scalar method
parameter (a stabilization constant in the AL method) on
the convergence rate of the preconditioned method and (2)
the choice of a matrix parameter for the AL method, which
involves an approximation of the inverse of the finite
element mass matrix. In Paper III we consider the
stationary Navier-Stokes problem with variable viscosity.
We show that the known efficient preconditioning techniques
in particular, those for the AL method, derived for
constant viscosity, can be straightforwardly applicable
also in this case.
One often used technique to solve the incompressible
Navier-Stokes problem with variable density is via operator
splitting, i.e., decoupling of the solutions for density,
velocity and pressure. The operator splitting technique
introduces an additional error, namely the splitting error,
which should be also considered, together with
discretization errors in space and time. Insuring the
accuracy of the splitting scheme usually induces additional
constrains on the size of the time-step. Aiming at fast
numerical simulations and using large time-steps may
require to use higher order time-discretization methods.
The latter issue and its impact on the preconditioned
iterative solution methods for the arising linear systems
are envisioned as possible directions for future research.
When modeling multi-phase flows, the Navier-Stokes
equations should be considered in their full complexity,
namely, the time-dependence, variable viscosity and
variable density formulation. Up to the knowledge of the
author, there are not many studies considering all aspects
simultaneously. Issues on this topic, in particular on the
construction of efficient preconditioners of the arising
matrices need to be further studied.}
}
@PhDThesis{ itlic:2011-001,
author = {David Ekl{\"o}v},
title = {Efficient Methods for Application Performance Analysis},
school = it,
department = docs,
year = 2011,
number = {2011-001},
type = {Licentiate thesis},
month = feb,
day = 18,
abstract = {To reduce latency and increase bandwidth to memory, modern
microprocessors are designed with deep memory hierarchies
including several levels of caches. For such
microprocessors, the service time for fetching data from
off-chip memory is about two orders of magnitude longer
than fetching data from the level-one cache. Consequently,
the performance of applications is largely determined by
how well they utilize the caches in the memory hierarchy,
captured by their miss ratio curves. However, efficiently
obtaining an application's miss ratio curve and
interpreting its performance implications is hard. This
task becomes even more challenging when analyzing
application performance on multicore processors where
several applications/threads share caches and memory
bandwidths. To accomplish this, we need powerful techniques
that capture applications' cache utilization and provide
intuitive performance metrics.
In this thesis we present three techniques for analyzing
application performance, StatStack, StatCC and Cache
Pirating. Our main focus is on providing memory hierarchy
related performance metrics such as miss ratio, fetch ratio
and bandwidth demand, but also execution rate. These
techniques are based on profiling information, requiring
both runtime data collection and post processing. For such
techniques to be broadly applicable the data collection has
to have minimal impact on the profiled application, allow
profiling of unmodified binaries, and not depend on custom
hardware and/or operating system extensions. Furthermore,
the information provided has to be accurate and easy to
interpret by programmers, the runtime environment and
compilers.
StatStack estimates an application's miss ratio curve,
StatCC estimates the miss ratio of co-running application
sharing the last-level cache and Cache Pirating measures
any desired performance metric available through hardware
performance counters as a function of cache size. We have
experimentally shown that our methods are both efficient
and accurate. The runtime information required by StatStack
and StatCC can be collected with an average runtime
overhead of 40\%. The Cache Pirating method measures the
desired performance metrics with an average runtime
overhead of 5\%. }
}
@PhDThesis{ itlic:2010-005,
author = {Mikael Laaksoharju},
title = {Let Us Be Philosophers! Computerized Support for Ethical
Decision Making},
school = it,
department = hci,
year = 2010,
number = {2010-005},
type = {Licentiate thesis},
month = sep,
day = 24,
abstract = {This thesis presents a computerized tool for ethical
decision making. For someone who is unfamiliar with the
psychological theory that the tool is based on, it will
perhaps first appear as a pointless piece of software. It
does not give any guidance to what an ethically correct
decision is, it does not suggest relevant ethical
principles or guidelines and it does not even make
reference to known cases of good moral conduct. In fact, it
does not make any moral claims at all. The only two things
that the tool does are that it stimulates reflective,
analytical and holistic reasoning and blocks automatic,
biased and constrained impulses. This approach is chosen to
improve the decision maker~s ability to consider the
relevant circumstances in a situation. By focusing on
relevant interests of stakeholders, the scope of
consideration in a moral situation can be expanded and the
impact of decisions can be evaluated with respect to these.
To justify this non-normative approach, the functionality
of normative ethics is analyzed. The conclusion stresses
the importance of self-conscious deliberation. Further
arguments for advocating a systematic, holistic and
self-critical handling of moral problems are collected from
both philosophy and psychology. The structure and
functionality of the tool is founded in psychological
theory and especially the problem of cognitive biases in
moral decision making is addressed. The tool has been
evaluated in two studies, which both indicate that it
actually delivers what it was designed to do. Statistically
significant results show that the tool helped users to
expand the scope of consideration in a moral problem
situation compared to using an equivalent
paper-and-pen-based method.}
}
@PhDThesis{ itlic:2010-004,
author = {Kenneth Duru},
title = {Perfectly Matched Layers for Second Order Wave Equations},
school = it,
department = tdb,
year = 2010,
number = {2010-004},
type = {Licentiate thesis},
month = may,
day = 7,
pages = 102,
copies-printed= 80,
abstract = {Numerical simulation of propagating waves in unbounded
spatial domains is a challenge common to many branches of
engineering and applied mathematics. Perfectly matched
layers (PML) are a novel technique for simulating the
absorption of waves in open domains. The equations modeling
the dynamics of phenomena of interest are usually posed as
differential equations (or integral equations) which must
be solved at every time instant. In many application areas
like general relativity, seismology and acoustics, the
underlying equations are systems of second order hyperbolic
partial differential equations. In numerical treatment of
such problems, the equations are often rewritten as first
order systems and are solved in this form. For this reason,
many existing PML models have been developed for first
order systems. In several studies, it has been reported
that there are drawbacks with rewriting second order
systems into first order systems before numerical solutions
are obtained. While the theory and numerical methods for
first order systems are well developed, numerical
techniques to solve second order hyperbolic systems is an
on-going research.
In the first part of this thesis, we construct PML
equations for systems of second order hyperbolic partial
differential equations in two space dimensions, focusing on
the equations of linear elasto-dynamics. One advantage of
this approach is that we can choose auxiliary variables
such that the PML is strongly hyperbolic, thus strongly
well-posed. The second is that it requires less auxiliary
variables as compared to existing first order formulations.
However, in continuum the stability of both first order and
second order formulations are linearly equivalent. A
turning point is in numerical approximations. We have found
that if the so-called geometric stability condition is
violated, approximating the first order PML with standard
central differences leads to a high frequency instability
for any given resolution. The second order discretization
behaves much more stably. In the second order setting
instability occurs only if unstable modes are well resolved.
The second part of this thesis discusses the construction
of PML equations for the time-dependent Schr\"odinger
equation. From mathematical perspective, the Schr\"odinger
equation is unique, in the sense that it is only first
order in time but second order in space. However, with
slight modifications, we carry over our ideas from the
hyperbolic systems to the Schr\"odinger equations and
derive a set of asymptotically stable PML equations. The
new model can be viewed as a modified complex absorbing
potential (CAP). The PML model can easily be adapted to
existing codes developed for CAP by accurately discretizing
the auxiliary variables and appending them accordingly.
Numerical experiments are presented illustrating the
accuracy and absorption properties of the new PML model.
We are hopeful that the results obtained in this thesis
will find useful applications in time-dependent wave
scattering calculations.}
}
@PhDThesis{ itlic:2010-003,
author = {Salman Zubair Toor},
title = {Managing Applications and Data in Distributed Computing
Infrastructures},
school = it,
department = tdb,
year = 2010,
number = {2010-003},
type = {Licentiate thesis},
month = mar,
day = 31,
abstract = {Over the last few decades, the needs of computational
power and data storage by collaborative, distributed
scientific communities have increased very rapidly.
Distributed computing infrastructures such as computing and
storage grids provide means to connect geographically
distributed resources and helps in addressing the needs of
these communities. Much progress has been made in
developing and operating grids, but several issues still
need further attention. This thesis discusses three
different aspects of managing large-scale scientific
applications in grids:
\begin{itemize}
\item Using large-scale scientific applications is often in
itself a complex task, and to set them up and run
experiments in a distributed environment adds another level
of complexity. It is important to design general purpose
and application specific frameworks that enhance the
overall productivity for the scientists. The thesis present
further development of a general purpose framework where
existing portal technology is combined with tools for
robust and middleware independent job management. Also, a
pilot implementation of a domain-specific problem solving
environment based on a grid-enabled R solution is presented.
\item Many current and future applications will need
large-scale storage systems. Centralized systems are
eventually not scalable enough to handle huge data volumes
and also have can have additional problems with security
and availability. An alternative is a reliable and
efficient distributed storage system. In the thesis the
architecture of a self-healing, grid-aware distributed
storage cloud, Chelonia, is described and performance
results for a pilot implementation are presented.
\item In a distributed computing infrastructure it is very
important to manage and utilize the available resources
efficiently. The thesis presents a review of different
resource brokering techniques and how they are implemented
in different production level middlewares. Also, a modified
resource allocation model for the Advanced Resource
Connector (ARC) middleware is described and performance
experiments are presented.
\end{itemize}}
}
@PhDThesis{ itlic:2010-002,
author = {Carl Nettelblad},
title = {Using {M}arkov Models and a Stochastic {L}ipschitz
Condition for Genetic Analyses},
school = it,
department = tdb,
year = 2010,
number = {2010-002},
type = {Licentiate thesis},
month = mar,
day = 19,
abstract = {A proper understanding of biological processes requires an
understanding of genetics and evolutionary mechanisms. The
vast amounts of genetical information that can routinely be
extracted with modern technology have so far not been
accompanied by an equally extended understanding of the
corresponding processes.
The relationship between a single gene and the resulting
properties, phenotype of an individual is rarely clear.
This thesis addresses several computational challenges
regarding identifying and assessing the effects of
quantitative trait loci (QTL), genomic positions where
variation is affecting a trait. The genetic information
available for each individual is rarely complete, meaning
that the unknown variable of the genotype in the loci
modelled also needs to be addressed. This thesis contains
the presentation of new tools for employing the information
that is available in a way that maximizes the information
used, by using hidden Markov models (HMMs), resulting in a
change in algorithm runtime complexity from exponential to
log-linear, in terms of the number of markers. It also
proposes the introduction of inferred haplotypes to further
increase the power to assess these unknown variables for
pedigrees of related genetically diverse individuals.
Modelling consequences of partial genetic information are
also treated.
Furthermore, genes are not directly affecting traits, but
are rather expressed in the environment of and in
concordance with other genes. Therefore, significant
interactions can be expected within genes, where some
combination of genetic variation gives a pronounced, or
even opposite, effect, compared to when occurring
separately. This thesis addresses how to perform efficient
scans for multiple interacting loci, as well as how to
derive highly accurate empirical significance tests in
these settings. This is done by analyzing the mathematical
properties of the objective function describing the quality
of model fits, and reformulating it through a simple
transformation. Combined with the presented prototype of a
problem-solving environment, these developments can make
multi-dimensional searches for QTL routine, allowing the
pursuit of new biological insight.}
}
@PhDThesis{ itlic:2010-001,
author = {Anna Nissen},
title = {Absorbing Boundary Techniques for the Time-dependent
Schr{\"o}dinger Equation},
school = it,
year = 2010,
number = {2010-001},
type = {Licentiate thesis},
month = feb,
day = 11,
abstract = {Chemical dissociation processes are important in quantum
dynamics. Such processes can be investigated theoretically
and numerically through the time-dependent Schr{\"o}dinger
equation, which gives a quantum mechanical description of
molecular dynamics.
This thesis discusses the numerical simulation of chemical
reactions involving dissociation. In particular, an
accurate boundary treatment in terms of artificial,
absorbing boundaries of the computational domain is
considered. The approach taken here is based on the
perfectly matched layer technique in a finite difference
framework. The errors introduced due to the perfectly
matched layer can be divided into two categories, the
modeling error from the continuous model and numerical
reflections that arise for the discretized problem. We
analyze the different types of errors using plane wave
analysis, and parameters of the perfectly matched layer are
optimized so that the modeling error and the numerical
reflections are of the same order. The level of accuracy is
determined by estimating the order of the spatial error in
the interior domain. Numerical calculations show that this
procedure enables efficient calculations within a given
accuracy. We apply our perfectly matched layer to a
three-state system describing a one-dimensional IBr
molecule subjected to a laser field and to a
two-dimensional model problem treating dissociative
adsorbtion and associative desorption of a H2 molecule on a
solid surface. Comparisons made to standard absorbing
layers in chemical physics prove our approach to be
efficient, especially when high accuracy is of importance.}
}
@PhDThesis{ itlic:2009-005,
author = {Martin Kronbichler},
title = {Numerical Methods for the {N}avier-{S}tokes Equations
Applied to Turbulent Flow and to Multi-Phase Flow},
school = it,
department = tdb,
year = 2009,
number = {2009-005},
type = {Licentiate thesis},
month = dec,
day = 16,
abstract = {This thesis discusses the numerical approximation of flow
problems, in particular the large eddy simulation of
turbulent flow and the simulation of laminar immiscible
two-phase flow. The computations for both applications are
performed with a coupled solution approach of the
Navier-Stokes equations discretized with the finite element
method.
Firstly, a new implementation strategy for large eddy
simulation of turbulent flow is discussed. The approach is
based on the variational multiscale method, where scale
ranges are separated by variational projection. The method
uses a standard Navier-Stokes model for representing the
coarser of the resolved scales, and adds a subgrid
viscosity model to the smaller of the resolved scales. The
scale separation within the space of resolved scales is
implemented in a purely algebraic way based on a plain
aggregation algebraic multigrid restriction operator. A
Fourier analysis underlines the importance of projective
scale separations and that the proposed model does not
affect consistency of the numerical scheme. Numerical
examples show that the method provides better results than
other state-of-the-art methods for computations at low
resolutions.
Secondly, a method for modeling laminar two-phase flow
problems in the vicinity of contact lines is proposed. This
formulation combines the advantages of a level set model
and of a phase field model: Motion of contact lines and
imposition of contact angles are handled like for a phase
field method, but the computational costs are similar to
the ones of a level set implementation. The model is
realized by formulating the Cahn-Hilliard equation as an
extension of a level set model. The phase-field specific
terms are only active in the vicinity of contact lines.
Moreover, various aspects of a conservative level set
method discretized with finite elements regarding the
accuracy of force balance and prediction in jump of
pressure between the inside and outside of a circular
bubble are tested systematically. It is observed that the
errors in velocity are mostly due to inaccuracies in
curvature evaluation, whereas the errors in pressure
prediction mainly come from the finite width of the
interface. The error in both velocity and pressure
decreases with increasing number of mesh points.}
}
@PhDThesis{ itlic:2009-004,
author = {Katharina Kormann},
title = {Numerical Methods for Quantum Molecular Dynamics},
school = it,
department = tdb,
year = 2009,
number = {2009-004},
type = {Licentiate thesis},
month = oct,
day = 9,
abstract = {The time-dependent Schr{\"o}dinger equation models the
quantum nature of molecular processes. Numerical
simulations of these models help in understanding and
predicting the outcome of chemical reactions.
In this thesis, several numerical algorithms for evolving
the Schr{\"o}dinger equation with an explicitly
time-dependent Hamiltonian are studied and their
performance is compared for the example of a pump-probe and
an interference experiment for the rubidium diatom. For the
important application of interaction dynamics between a
molecule and a time-dependent field, an efficient fourth
order Magnus--Lanczos propagator is derived. Error growth
in the equation is analyzed by means of a posteriori error
estimation theory and the self-adjointness of the
Hamiltonian is exploited to yield a low-cost global error
estimate for numerical time evolution. Based on this
theory, an $h,p$-adaptive Magnus--Lanczos propagator is
developed that is capable to control the global error.
Numerical experiments for various model systems (including
a three dimensional model and a dissociative configuration)
show that the error estimate is effective and the number of
time steps needed to meet a certain accuracy is reduced due
to adaptivity.
Moreover, the thesis proposes an efficient numerical
optimization framework for the design of femtosecond laser
pulses with the aim of manipulating chemical reactions.
This task can be formulated as an optimal control problem
with the electric field of the laser being the control
variable. In the algorithm described here, the electric
field is Fourier transformed and it is optimized over the
Fourier coefficients. Then, the frequency band is narrowed
which facilitates the application of a quasi-Newton method.
Furthermore, the restrictions on the frequency band make
sure that the optimized pulse can be realized by the
experimental equipment. A numerical comparison shows that
the new method can outperform the Krotov method, which is a
standard scheme in this field.}
}
@PhDThesis{ itlic:2009-003,
author = {Marta L{\'a}rusd{\'o}ttir},
title = {Listen to Your Users - The Effect of Usability Evaluation
on Software Development Practice},
school = it,
year = 2009,
number = {2009-003},
type = {Licentiate thesis},
month = oct,
day = 2,
abstract = {A vast majority of the people in the western world use
software systems on daily basis for achieving their goals.
To be able to do that each person needs to communicate what
he or she wants to do to the system and receive a response.
This communication needs to be easy for the user,
especially when the system is new to him or her. Otherwise,
the user either quits using the system; it takes a very
long time or gets very irritated. A software team that is
making new software needs to evaluate the usability of the
system and various methods have been introduced in the
literature to do that.
My research focus in this thesis is on usability
evaluation. I study particularly, how usability evaluation
methods can be compared, what data should be gathered in
usability evaluation to gain knowledge on how the software
affects users who are getting new software for their daily
work and how useful this data is to the recipients.
Two experiments are reported in this thesis where results
from using three different usability evaluation methods are
compared. The main result from these two studies is that
the think-aloud evaluation method should be used, if the
goal of the evaluation is to gather as realistic
information as possible on usability problems that the
users will have when using the system.
Furthermore four case studies are described in the thesis,
in which usability evaluation was done by using the
think-aloud method in co-operation with real users in their
real work situation. These studies give much richer
information on the actual use of the systems involved.
The findings from one of these case studies indicate that
the results from user observation done on a system that
users have not seen before or used only for few days are
rather similar to the results from usability evaluation
done when users have used the system for a longer period.
So the common practice of doing user observation on a
software system that the participants have not seen before
and then interpreting that the results will be the same for
actual usage of the system when users will use the system
for their real tasks for shorter or longer period is
adequate.}
}
@PhDThesis{ itlic:2009-002,
author = {Elina Eriksson},
title = {Making Sense of Usability - Organizational Change and
Sensemaking when Introducing User-Centred Systems Design in
Public Authorities},
school = it,
department = hci,
year = 2009,
number = {2009-002},
type = {Licentiate thesis},
month = oct,
day = 2,
abstract = {Computers have become an everyday encounter, not at least
in work settings. These computers must support the user in
order for her to work in an effective and efficient manner.
The field of Human-Computer Interaction (HCI) has among
other things been focusing on this issue, and there are
numerous methods and activities that aim at helping
developers to develop usable computer systems. However, the
methods and activities must be used in practice in order to
be beneficial, not only within research, thus the methods
must make sense to the system developers, as well as the
organization in which they shall be applied. Furthermore,
the organization must change in order to incorporate these
methods and activities, and this change must impact a
larger part of the organization than just the IT-department.
My research has revolved around the introduction of
usability methods in public authorities, in particular
user-centred systems design (UCSD). My methodology has been
action research, which implies a close collaboration with
practitioners. Some of the methods used to gather data have
been interviews, participatory observations, research
diaries and field studies.
In this licentiate thesis I present my work up to date and
the theories that have informed my understanding of
organizations and organizational change. Furthermore I have
been influenced by the sensemaking theory, which can be
used in order to understand how people make sense of
technology, methods and organizational change. With the
help of these theories, I extend my results further than
presented in the papers.
The notion of organizational change when introducing
usability issues has not achieved sufficient attention in
the HCI-field. This thesis is a step towards an
understanding of this issue. Furthermore, I have, with the
results from my papers together with the theories presented
shown that although formal documents can be used to promote
change, it is not enough. Rather there is a need to further
explore the interplay between formal aspects and the
situated work, and how to enhance sensegiving in this
sensemaking process.}
}
@PhDThesis{ itlic:2009-001,
author = {Joakim Eriksson},
title = {Detailed Simulation of Heterogeneous Wireless Sensor
Networks},
school = it,
department = docs,
year = 2009,
number = {2009-001},
type = {Licentiate thesis},
month = may,
day = 14,
abstract = {Wireless sensor networks consist of many small nodes. Each
node has a small microprocessor, a radio chip, some
sensors, and is usually battery powered which limits
network lifetime. Applications of wireless sensor networks
range from environmental monitoring and health-care to
industrial automation and military surveillance.
Since the nodes are battery powered and communication
consumes more than computation much of the research focuses
on power efficient communication. One of the problems is
however to measure the power consumption and communication
quality of the different solutions.
Simulation of sensor networks can greatly increase
development speed and also be used for evaluating power
consumption as well as communication quality. Simulation
experiments typically give easier access to fine grained
results than corresponding real-world experiments. The
problem with simulators is that it is hard to show that a
simulation experiment corresponds well with a similar
real-world experiment.
This thesis studies how detailed simulation of wireless
sensor networks can be validated for accuracy and also
shows several important uses of detailed simulation such as
power consumption profiling and interoperability testing.
Both of them represent important topics in today's wireless
sensor network research and development.
The results of the thesis are the simulation platform
COOJA/MSPSim and that we show that MAC-protocol experiments
performed in our simulator COOJA/MSPSim correspond well
with experiments performed in our testbed. We also show
that using COOJA/MSPSim any software running in the
simulation can be power profiled.}
}
@PhDThesis{ itlic:2008-003,
author = {Andreas Hellander},
title = {Numerical Simulation of Well Stirred Biochemical Reaction
Networks Governed by the Master Equation},
school = it,
department = tdb,
year = 2008,
number = {2008-003},
type = {Licentiate thesis},
month = oct,
day = 15,
pages = 34,
note = {Included papers available at
\url{http://dx.doi.org/10.1016/j.jcp.2007.07.020},
\url{http://dx.doi.org/10.1007/s10543-008-0174-z}, and
\url{http://dx.doi.org/10.1063/1.2897976}},
abstract = {Numerical simulation of stochastic biochemical reaction
networks has received much attention in the growing field
of computational systems biology. Systems are frequently
modeled as a continuous-time discrete space Markov chain,
and the governing equation for the probability density of
the system is the (chemical) master equation. The direct
numerical solution of this equation suffers from an
exponential growth in computational time and memory with
the number of reacting species in the model. As a
consequence, Monte Carlo simulation methods play an
important role in the study of stochastic chemical
networks. The stochastic simulation algorithm (SSA) due to
Gillespie has been available for more than three decades,
but due to the multi-scale property of the chemical systems
and the slow convergence of Monte Carlo methods, much work
is currently being done in order to devise more efficient
approximate schemes.
In this thesis we review recent work for the solution of
the chemical master equation by direct methods, by exact
Monte Carlo methods and by approximate and hybrid methods.
We also describe two conceptually different numerical
methods to reduce the computational time when studying
models using the SSA. A hybrid method is proposed, which is
based on the separation of species into two subsets based
on the variance of the copy numbers. This method yields a
significant speed-up when the system permits such a
splitting of the state space. A different approach is taken
in an algorithm that makes use of low-discrepancy sequences
and the method of uniformization to reduce variance in the
computed density function.}
}
@PhDThesis{ itlic:2008-002,
author = {Ioana Rodhe},
title = {Query Authentication and Data Confidentiality in Wireless
Sensor Networks},
school = it,
department = docs,
year = 2008,
number = {2008-002},
type = {Licentiate thesis},
month = jun,
day = 11,
abstract = {In this thesis we consider different aspects of security
in sensor networks, in particular query authentication and
confidential data aggregation. Authenticating the queries
is important so attackers cannot modify existing queries
because this would lead to wrong readings; or insert new
queries into the network because this would lead to waste
of energy. When answering to queries, in-network
aggregation in sensor networks is an efficient way to save
energy. Nevertheless, node capture in hostile environments
require protocols for data aggregation where the
intermediate nodes contribute with their own values to the
aggregated data without getting access to it.
Our contributions are two protocols for query
authentication and confidential data aggregation together
with a common layered key distribution scheme. Both static
and mobile base stations are supported. The proposed
protocols use symmetric cryptography, which is preferred in
sensor networks because of the sensor's limited
computational power, energy supply and memory storage. The
results from our simulations show that, if an attacker
captures a small number of nodes, the attacker can only
introduce unauthorized queries into a limited part of the
network and can only get access to a small part of the data
that is aggregated into the network.}
}
@PhDThesis{ itlic:2008-001,
author = {Mattias Wiggberg},
title = {Unwinding Processes in Computer Science Student Projects},
school = it,
department = docs,
year = 2008,
number = {2008-001},
type = {Licentiate thesis},
month = mar,
day = {19},
abstract = {This thesis investigates computer science student projects
and some of the processes involved in the running of such
projects. The reason for this investigation is that there
are some interesting claims concerning the use of projects
as learning approach. For example, they are supposed to
give an extra challenge to the students and prepare them
for working life, by adding known development methods from
industry the sense of reality is emphasized, and involving
industry partners as mock clients also increases the
feeling of reality, but still unclear if these features
contribute to the students' learning and what can be done
to increase the potential for learning. There are thus
interesting pedagogical challenges with computer science
student projects. There is a need to better understand the
effects on learning outcomes as a function of how a student
project is designed. The focus in this thesis is on the
effects of role taking in the project groups, work
allocation, and goal setting in student projects.
In this thesis, three studies investigating different
aspects of processes in computer science student projects
are presented. A number of conclusions are drawn, which
serve as a starting point for further research.
The first study investigates how power is distributed
within a group of students in a full semester computer
science project course. Perceived competence of fellow
students contributes to personal influence in the student
project groups, and three qualitatively different ways of
experiencing competence among other students have been
identified.
The second study investigates experiences of the process of
decision-making in a full semester computer science project
course. Six categories describing the experience of
decision-making have been identified spanning from the
experience of decision-making in individual decisions too
small and unimportant to handle by anyone else than the
individual to the experience of decision-making as a
democratic process involving both the full group and the
context in which the group acts.
The third study investigates Swedish engineering students'
conceptions of engineering, where dealing with problems and
their solutions and creativity are identified as core
concepts. Subject concepts, as math, and physics do not
appear in any top position. ``Math'', for example, accounts
for only five percent of the total mentioned engineering
terms. ``Physics'', the second highest ranked subject term,
only accounts for circa 1 percent.
By combining the results from the three studies, four
central areas of general interest for designing and running
student projects have been identified. These four features
are: 1) the mechanism for work allocation; 2) students
connection to external stakeholders; 3) focus on result or
process; and 4) level of freedom in the project task. These
four features are related to the results from the three
studies in this thesis. The thesis is concluded by
proposing an analytical framework based on those four
features. The intention with the framework is to provide a
useful tool for the analysis and development of future
computer science student projects. }
}
@PhDThesis{ itlic:2007-006,
author = {Bj{\"o}rn Halvarsson},
title = {Interaction Analysis and Control of Bioreactors for
Nitrogen Removal},
school = it,
department = syscon,
year = 2007,
number = {2007-006},
type = {Licentiate thesis},
month = dec,
abstract = {Efficient control of wastewater treatment processes are of
great importance. The requirements on the treated water
(effluent standards) have to be met at a feasible cost.
This motivates the use of advanced control strategies. In
this thesis the activated sludge process, commonly found in
the biological wastewater treatment step for nitrogen
removal, was considered. Multivariable interactions present
in this process were analysed. Furthermore, control
strategies were suggested and tested in simulation studies.
The relative gain array (RGA), Gramian-based interaction
measures and an interaction measure based on the
$\mathcal{H}_2$ norm were considered and compared.
Properties of the $\mathcal{H}_2$ norm based measure were
derived. It was found that the Gramian-based measures, and
particularly the $\mathcal{H}_2$ norm based measure, in
most of the considered cases were able to properly indicate
the interactions. The information was used in the design of
multivariable controllers. These were found to be less
sensitive to disturbances compared to controllers designed
on the basis of information from the RGA.
The conditions for cost-efficient operation of the
activated sludge process were investigated. Different fee
functions for the effluent discharges were considered. It
was found that the economic difference between operation in
optimal and non-optimal setpoints may be significant even
though the treatment performance was the same. This was
illustrated graphically in operational maps. Strategies for
efficient control were also discussed.
Finally, the importance of proper aeration in the activated
sludge process was illustrated. Strategies for control of a
variable aeration volume were compared. These performed
overall well in terms of treatment efficiency, disturbance
rejection and process economy.}
}
@PhDThesis{ itlic:2007-005,
author = {Mahen Jayawardena},
title = {Parallel Algorithms and Implementations for Genetic
Analysis of Quantitative Traits},
school = it,
department = tdb,
year = 2007,
number = {2007-005},
type = {Licentiate thesis},
month = sep,
day = 28,
note = {Typo corrected Sep 10 2007. Included papers available at
\url{http://www.it.uu.se/research/publications/lic/2007-005/paperA.pdf},
\url{http://www.it.uu.se/research/publications/lic/2007-005/paperB.pdf},
\url{http://www.it.uu.se/research/publications/lic/2007-005/paperC.pdf},
and
\url{http://www.it.uu.se/research/publications/lic/2007-005/paperD.pdf}}
,
abstract = {Many important traits in plants, animals and humans are
quantitative, and most such traits are generally believed
to be regulated by multiple genetic loci. Standard
computational tools for analysis of quantitative traits use
linear regression models for relating the observed
phenotypes to the genetic composition of individuals in a
population. However, using these tools to simultaneously
search for multiple genetic loci is very computationally
demanding. The main reason for this is the complex nature
of the optimization landscape for the multidimensional
global optimization problems that must be solved. This
thesis describes parallel algorithms and implementation
techniques for such optimization problems. The new
computational tools will eventually enable genetic analysis
exploiting new classes of multidimensional statistical
models, potentially resulting in interesting results in
genetics.
We first describe how the algorithm used for global
optimization in the standard, serial software is
parallelized and implemented on a grid system. Then, we
also describe a parallelized version of the more elaborate
global optimization algorithm DIRECT and show how this can
be deployed on grid systems and other loosely-coupled
architectures. The parallel DIRECT scheme is further
developed to exploit both coarse-grained parallelism in
grid or clusters as well as fine-grained, tightly-coupled
parallelism in multi-core nodes. The results show that
excellent speedup and performance can be archived on grid
systems and clusters, even when using a tightly-coupled
algorithms such as DIRECT. Finally, a pilot implementation
of a grid portal providing a graphical front-end for our
code is implemented. After some further development, this
portal can be utilized by geneticists for performing
multidimensional genetic analysis of quantitative traits on
a regular basis.}
}
@PhDThesis{ itlic:2007-004,
author = {Olof Rensfelt},
title = {Tools and Methods for Evaluation of Overlay Networks},
school = it,
department = docs,
year = 2007,
number = {2007-004},
type = {Licentiate thesis},
month = sep,
day = 27,
abstract = {Overlay networks is a popular method to deploy new
functionality which does not currently exist in the
Internet. Such networks often use the peer-to-peer
principle where users are both servers as well as clients
at the same time. We evaluate how overlay networks performs
in a mix of strong and weak peers. The overlay system of
study in this thesis is Bamboo, which is based on a
distributed hash table (DHT).
For the performance evaluation we use both simulations in
NS-2 and emulations in the testbed PlanetLab. One of our
contributions is a NS-2 implementation of the Bamboo DHT.
To simulate nodes joining and leaving, NS-2 is modified to
be aware of the identity of overlay nodes.
To control experiments on PlanetLab we designed Vendetta.
Vendetta is both a tool to visualize network events and a
tool to control the individual peer-to-peer nodes on the
physical machines. PlanetLab does not support bandwidth
limitations which is needed to emulate weak nodes.
Therefore we designed a lightweight connectivity tool
called Dtour.
Both the NS-2 and PlanetLab experiments indicate that a
system like Bamboo can handle as much as 50 \% weak nodes
and still serve requests. Although, the lookup latency and
the number of successful lookups suffer with the increased
network dynamics.}
}
@PhDThesis{ itlic:2007-003,
author = {Thabotharan Kathiravelu},
title = {Towards Content Distribution in Opportunistic Networks},
school = it,
department = docs,
year = 2007,
number = {2007-003},
type = {Licentiate thesis},
month = jun,
day = 8,
note = {Typo corrected 2007-06-01},
abstract = {Opportunistic networking is a new communication paradigm.
Content Distribution in opportunistic networks is
challenging due to intermittent connectivity, short
connection durations and a highly dynamic topology.
Research is needed to develop new applications and
protocols that can distribute content in opportunistic
networks. This thesis explores and evaluates approaches to
developing mobile P2P systems for content distribution,
focusing mainly on the problem of modeling device contacts.
Contact duration and patterns of connections influence
routing and forwarding strategies.
To model device contacts we need to capture the
characteristics of the network and be able to model its
behavior. Connectivity models capture the aggregated
network behavior by modeling the social connectedness of
the network. A model of inter-device relationships can be
constructed using parameters are extracted from
connectivity traces collected in the field using real
devices. These traces represent how connectivity might look
in an opportunistic network. Altering and fine tuning these
parameters enables us to change the stochastic behavior of
the network and study the behavior of applications and
protocols. Another approach is mobility modeling. There are
two major drawbacks to using mobility models. First, in ad
hoc networks traces have been collected which estimate the
connectivity of the network. Typically traces are then used
to model node mobility which in turn generates nodal
connectivity during a simulation. This is a wasteful
process and as the network size grows it becomes a tedious
job. Second, the inter-contact time distribution observed
in traces differs greatly from what is generated by popular
mobility models.
We have developed a connectivity model to generate
synthetic device contact traces for opportunistic networks.
We present the preliminary validation results from a
comparative study of synthetic traces and traces collected
in the field.}
}
@PhDThesis{ itlic:2007-002,
author = {Jonas Boustedt},
title = {Students Working with a Large Software System: Experiences
and Understandings},
school = it,
year = 2007,
number = {2007-002},
pages = 162,
copies-printed= 80,
type = {Licentiate thesis},
month = may,
day = 24,
abstract = {This monograph describes an empirical study with the
overall aim of producing insights about how students
experience the subject Computer Science and its learning
environments, particularly programming and software
engineering.
The research takes a start in the students' world, from
their perspective, using their stories, and hence, we have
chosen a phenomenographic approach for our research. By
interpreting the students' descriptions and experiences of
various phenomena and situations, it is possible to gain
knowledge about which different conceptions students can
have and how teaching and the learning environment affect
their understanding. In this study, we focus specifically
on students' conceptions of aspects of object-oriented
programming and their experiences of problem solving
situations in connection with object-oriented system
development.
The questions posed enlighten and focus on the students'
conceptions of both tangible and abstract concepts; the
study investigates how students experienced a task
concerning development in a specific software system, how
they conceived the system itself, and how the students
describe the system's plugin modules. Academic education in
programming deals with abstract concepts, such as
interfaces in the programming language Java. Hence, one of
the questions in this study is how students describe that
specific abstract concept, in a context where they are
conducting a realistic software engineering task.
The results show that there is a distinct variation of
descriptions, spanning from a concrete to-do list, to a
more advanced description where the interface plays a
crucial role in order to produce dynamic and adaptive
systems. The discussion interprets the results and suggests
how we can use them in teaching to provide an extended and
varied understanding, where the educational goal is to
provide for and strengthen the conditions for students to
be able to learn how to develop and understand advanced
software.}
}
@PhDThesis{ itlic:2007-001,
author = {Manivasakan Sabesan},
title = {Querying Mediated Web Services},
school = it,
department = csd,
year = 2007,
number = {2007-001},
type = {Licentiate thesis},
month = feb,
day = 5,
abstract = {Web services provide a framework for data interchange
between applications by incorporating standards such as
XMLSchema, WSDL SOAP, HTTP etc. They define operations to
be invoked over a network to perform the actions. These
operations are described publicly in a WSDL document with
the data types of their argument and result. Searching data
accessible via web services is essential in many
applications. However, web services don't provide any
general query language or view capabilities. Current web
services applications to access the data must be developed
using a regular programming language such Java, or C#.
The thesis provides an approach to simplify querying web
services data and proposes efficient processing of database
queries to views of wrapped web services. To show the
effectiveness of the approach, a prototype, web Service
MEDiator system (WSMED), is developed.
WSMED provides general view and query capabilities over
data accessible through web services by automatically
extracting basic meta-data from WSDL descriptions. Based on
imported meta-data, the user can then define views that
extract data from the results of calls to web service
operations. The views can be queried using SQL. A given
view can access many different web service operations in
different ways depending on what view attributes are known.
The views can be specified in terms of several declarative
queries to be applied by the query processor. In addition,
the user can provide semantic enrichments of the meta-data
with key constraints to enable efficient query execution
over the views by automatic query transformations. We
evaluated the effectiveness of our approach over
multi-level views of existing web services and show that
the key constraint enrichments substantially improve query
performance.}
}
@PhDThesis{ itlic:2006-012,
author = {Stefan Blomkvist},
title = {User-Centred Design and Agile Development of {IT}
Systems},
school = it,
department = hci,
year = 2006,
number = {2006-012},
type = {Licentiate thesis},
month = dec,
day = 7,
abstract = {Despite the knowledge on the interaction between humans
and computers, too many IT systems show great deficits when
it comes to usability. Every day we run into technology
that makes our every day life and our work unnecessarily
complex and difficult because of the IT systems that are
not designed to support our tasks in a usable way. This
thesis deals with different aspects of usability and the
process of how to develop usable IT systems effectively.
Primarily, the systems concerned are used in professional
work, such as case handling systems in large government
organisations.
The main objective of this research is to understand which
essential factors in the system development process that
facilitate the development of usable IT systems. Another
key subject is how Human-computer interaction (HCI)
knowledge can be integrated into systems development, in
particular the integration of user-centred design (UCD) and
agile software development. The research is based on a
qualitative approach and on reflections from my own
experience in development projects. It also includes
exploratory studies and design cases.
The attempts of bridging the gap between HCI and software
engineering have not been notably successful in practice.
To address some of these problems, there is a need for a
more precise definition of user-centred design, which is
proposed in the thesis. Also, the complicated reality of
systems development is not considered enough by HCI
researchers and practitioner. To reach better results, UCD
has to be integrated as a natural part of the development
process. In the thesis, I argue that the agile approach
together with UCD can be a good starting point for this
integration. The agile approach emphasises that responding
to change in development is more important than strictly
adhering to a plan. Also, it prioritises regular deliveries
of working software over extensive models and
documentation. However, from a HCI perspective, agile
processes do not inherently provide the required support
for user-centred design. Nevertheless, the basic values and
specific methods of agile development may have the
potential to work very well together with UCD. For
instance, iterative development is fundamental to both
user-centred design and agile development.
Finally, the research addresses how iterative methods can
be used to find design solutions that support the users to
cope with the problems of overview and control in case
handling work.}
}
@PhDThesis{ itlic:2006-011,
author = {{\AA}sa Cajander},
title = {Values and Perspectives Affecting {IT} Systems Development
and Usability Work},
school = it,
department = hci,
year = 2006,
number = {2006-011},
pages = {126},
copies-printed= {80},
type = {Licentiate thesis},
month = dec,
day = 7,
abstract = {Computer supported work is often stressful and inadequate
computer systems and poor usability contribute to the
problem. Still the work situation, and work environment of
users are seldom considered when developing computer
systems, and it is difficult to incorporate the ideas of
User Centred Systems Design (UCSD) in practice. Hence, this
research addresses the difficulty in integrating usability,
UCSD and occupational health issues in IT systems
development in order to improve the resulting work
situation and well-being of users. How do basic values and
perspectives of stakeholders in systems development
projects affect the work with UCSD, usability and usersâ€™
health issues in the organisations studied?
This research aims at influencing systems development in
practice; hence, research is carried out in real life
settings with an action research approach. Data is gathered
and analysed with a qualitative research approach with
interview studies, meetings with stakeholders, analysis of
documentation, observations and field studies. The
theoretical framework adheres to situated action,
participatory design, and UCSD that stresses the importance
of involving users in the design process.
This research shows that several basic values and
perspectives affect systems development and hinder the
usability work, for example, the perspective on user
representatives, the value of rationality and objectivity,
and the perspective underpinning descriptions and discourse
on work. Moreover, this research indicates that the strong
business values of automation, efficiency and customer
satisfaction shape the development of new technology, and
ultimately the tasks and work practices of the civil
servants. In short, the studies show that there are some
contradictions in business values and the implementation of
user-centred systems design, usability and health issues in
systems development.
Attitudes and perspectives are not easily changed, and
change comes gradually. In these organisations, we
continuously discuss the integration of health issues in
systems development, and by introducing and changing the
models of systems development these will hopefully enable
communication and change forwards of new perspectives and
values. However, a focus on models alone is insufficient
and therefore we need to develop a systematic approach to
include reflection and new perspectives. Perhaps the
reflection itself would help us see our values and
perspectives and to alter them?}
}
@PhDThesis{ itlic:2006-010,
author = {Henrik Johansson},
title = {Performance Characterization and Evaluation of Parallel
PDE Solvers},
school = it,
department = tdb,
year = 2006,
number = {2006-010},
type = {Licentiate thesis},
month = nov,
day = {22},
pages = {90},
copies-printed= {80},
note = {Included papers available at
\url{http://www.it.uu.se/research/publications/lic/2006-010/paperA.pdf},
\url{http://www.it.uu.se/research/publications/lic/2006-010/paperB.pdf},
and
\url{http://www.it.uu.se/research/publications/lic/2006-010/paperC.pdf}}
,
abstract = {Computer simulations that solve partial differential
equations (PDEs) are common in many fields of science and
engineering. To decrease the execution time of the
simulations, the PDEs can be solved on parallel computers.
For efficient parallel implementations, the characteristics
of both the hardware and the PDE solver must be taken into
account. In this thesis, we explore two ways to increase
the efficiency of parallel PDE solvers.
First, we use full-system simulation of a parallel computer
to get detailed knowledge about cache memory usage for
three parallel PDE solvers. The results reveal cases of bad
cache memory locality. This insight can be used to improve
the performance of the PDE solvers.
Second, we study the adaptive mesh refinement (AMR)
partitioning problem. Using AMR, computational resources
are dynamically concentrated to areas in need of a high
accuracy. Because of the dynamic resource allocation, the
workload must repeatedly be partitioned and distributed
over the processors. We perform two comprehensive
characterizations of partitioning algorithms for AMR on
structured grids. For an efficient parallel AMR
implementation, the partitioning algorithm must be
dynamically selected at run-time with regard to both the
application and computer state. We prove the viability of
dynamic algorithm selection and present performance data
that show the benefits of using a large number of
complementing partitioning algorithms. Finally, we discuss
how our characterizations can be used in an algorithm
selection framework.}
}
@PhDThesis{ itlic:2006-009,
author = {Eddie Wadbro},
title = {Topology Optimization for Acoustic Wave Propagation
Problems},
school = it,
department = tdb,
year = 2006,
number = {2006-009},
type = {Licentiate thesis},
month = oct,
day = 13,
copies-printed= {80},
pages = {112},
abstract = {The aim of this study is to develop numerical techniques
for the analysis and optimization of acoustic horns for
time harmonic wave propagation. An acoustic horn may be
viewed as an impedance transformer, designed to give an
impedance matching between the feeding waveguide and the
surrounding air. When modifying the shape of the horn, the
quality of this impedance matching changes, as well as the
angular distribution of the radiated wave in the far field
(the directivity). The dimensions of the horns considered
are in the order of the wavelength. In this wavelength
region the wave physics is complicated, and it is hard to
apply elementary physical reasoning to enhance the
performance of the horn. Here, topology optimization is
applied to improve the efficiency and to gain control over
the directivity of the acoustic horn.}
}
@PhDThesis{ itlic:2006-008,
author = {Agnes Rensfelt},
title = {Nonparametric Identification of Viscoelastic Materials},
school = it,
department = syscon,
year = 2006,
number = {2006-008},
type = {Licentiate thesis},
month = oct,
day = 6,
abstract = {Viscoelastic materials can today be found in a wide range
of practical applications. In order to make efficient use
of these materials in construction, it is of importance to
know how they behave when subjected to dynamic load.
Characterization of viscoelastic materials is therefore an
important topic, that has received a lot of attention over
the years.
This thesis treats different nonparametric methods for
identifying the complex modulus of an viscoelastic
material. The complex modulus is a frequency dependent
material function, that describes the deformation of the
material when subjected to uniaxial stress. With knowledge
about this and other material functions, it is possible to
simulate and predict how the material behaves under
different kinds of dynamic loads. The complex modulus is
often identified through wave propagation testing.
An important aspect of identification is the accuracy of
the estimates. For the identification to be as accurate as
possible, it is important that the experimental data
contains as much valuable information as possible.
Different experimental condition, such as sensor locations
and choice of excitation, can influence the amount of
valuable information in the data. The procedure of
determining optimal values for such design parameters is
known as optimal experiment design.
The first two papers of the thesis treats optimal
experiment design for nonparametric identification of the
complex modulus, based on wave propagation tests on large
homogenous specimens. Optimal sensor locations is treated
in the first paper, and optimal excitation in the second.
In the third paper, a technique for estimating the complex
modulus for a small pellet-sized specimen is presented.
Three different procedures are considered, and an analysis
of the accuracy of the estimates is carried out.}
}
@PhDThesis{ itlic:2006-007,
author = {Stefan Engblom},
title = {Numerical Methods for the Chemical Master Equation},
school = it,
department = tdb,
year = 2006,
number = {2006-007},
type = {Licentiate thesis},
month = sep,
day = 27,
copies-printed= 80,
pages = 112,
note = {Included papers available at
\url{http://www.it.uu.se/research/publications/lic/2006-007/paperA.pdf},
\url{http://www.it.uu.se/research/publications/lic/2006-007/paperB.pdf},
and
\url{http://www.it.uu.se/research/publications/lic/2006-007/paperC.pdf}}
,
abstract = {The numerical solution of chemical reactions described at
the meso-scale is the topic of this thesis. This
description, the \emph{master equation of chemical
reactions}, is an accurate model of reactions where
stochastic effects are crucial for explaining certain
effects observed in real life. In particular, this general
equation is needed when studying processes inside living
cells where other macro-scale models fail to reproduce the
actual behavior of the system considered.
The main contribution of the thesis is the numerical
investigation of two different methods for obtaining
numerical solutions of the master equation.
The first method produces statistical quantities of the
solution and is a generalization of a frequently used
macro-scale description. It is shown that the method is
efficient while still being able to preserve stochastic
effects.
By contrast, the other method obtains the full solution of
the master equation and gains efficiency by an accurate
representation of the state space.
The thesis contains necessary background material as well
as directions for intended future research. An important
conclusion of the thesis is that, depending on the setup of
the problem, methods of highly different character are
needed.}
}
@PhDThesis{ itlic:2006-006,
author = {Anna Eckerdal},
title = {Novice Students' Learning of Object-Oriented Programming},
school = it,
department = tdb,
year = 2006,
number = {2006-006},
type = {Licentiate thesis},
month = oct,
day = 6,
abstract = {This thesis investigates students' experiences of learning
to program. Learning to program is a complex activity. It
involves elements of learning abstract concepts as well as
both learning and using advanced resources like computers
and compilers. The learning experience is affected by
factors like students' motives to learn and their general
understanding of what learning to program means. These
issues form the basis for the four research themes
addressed in this thesis, specifically: students'
experiences of what learning to program means; how students
understand central concepts in programming; how students
use and experience help from resources; and students'
motives to learn to program.
The thesis presents a qualitative study on novice students'
experiences of learning object-oriented programming. Data
was collected via semi-structured interviews. The
interviews were analysed mainly using a phenomenographic
research approach. The analysis resulted in the formulation
of categories of description of students' qualitatively
different ways to understand what learning to program
means. In addition, categories describing different ways to
understand the concepts \emph{object} and \emph{class} in
object-oriented programming were formulated. From an
educational point of view, these results can be used to
identify aspects of learning to program that are critical
from the students' perspective.
The analysis of students' use of resources revealed that
some resources were mainly used in a search-for-meaning way
that promotes good learning, while another group of
resources were mainly used in a superficial way. The two
groups of resources seem however to interact with each
other when students take responsibility for their own
learning, which in particular characterizes their work with
the larger computer assignments. When working with those,
the students describe that both groups of resources were
important for the learning. The analysis of students'
descriptions of their motives to learn pinpoints motives
that can enhance learning.
In the study there were students who expressed that they
had problems to know how to go about to study computer
programming. This might indicate problems about knowing how
to use available resources efficiently. Students who do not
know how to use resources like the compiler in an efficient
way, will have difficulties to perform assignments, which
is expressed by the students as very important for the
learning of concepts. The results also indicate the
importance for educators to provide a learning environment
with a variety of resources which can connect to students'
different motives to learn, pointed to in the study. In
this way all four aspects of the learning experience
examined in the present study are important for students'
learning of object-oriented programming. }
}
@PhDThesis{ itlic:2006-005,
author = {Arvid Kauppi},
title = {A Human-Computer Interaction Approach to Train Traffic
Control},
school = it,
department = hci,
year = 2006,
number = {2006-005},
pages = 112,
copies-printed= 80,
type = {Licentiate thesis},
month = may,
day = 30,
abstract = {Train traffic in Sweden have increased over the last years
and is today at an all time high. At the same time demands
for improved punctuality and better predictability are
increasing. If it would be possible to improve punctuality
and thereby the possibility to use the infrastructural
resources more optimally by providing improved systems for
controlling train traffic, this could be a very cost
efficient way to improve the train traffic.
This thesis addresses research with a primary goal to
investigate how, from a Human-Computer Interaction
perspective, systems could be designed to better support
the human's capacity and capabilities to control train
traffic in an efficient way. Earlier research on humans in
control of complex systems contributes to this work. One
important part of the research is to gain knowledge on how
train dispatchers perform their work today and which
difficulties that exist. The research has a user centered
approach. In close co-operation with train traffic
professionals we try to discuss and develop solutions for
improving the situation Since the project started in 1996
proposals of new strategies for control and also solutions
for how such a system can be designed have been developed
and to some extent evaluated.
The Swedish National Rail Administration (Banverket) is now
planning to build an operative control system based on
control strategies and ideas from this research project.}
}
@PhDThesis{ itlic:2006-004,
author = {Mikael Erlandsson},
title = {Usability in Transportation -- Improving the Analysis of
Cognitive Work Tasks},
school = it,
department = hci,
year = 2006,
number = {2006-004},
type = {Licentiate thesis},
month = jun,
day = 02,
note = {ISSN of originally printed version should be 1404-5117},
abstract = {In most vehicle domains within the transportation sector,
traffic is increasing and vehicles are becoming more
technologically advanced. In addition to this, drivers are
faced with conflicting goals, such as punctuality,
maintaining safety, minimizing fuel consumption, ensuring
passenger comfort, etc. When accidents occur, the drivers'
actions and mishaps are often in focus, even though the
work environment, the organization behind the drivers, and
the educational level may provide equally important
explanations for incidents and actions.
In this thesis, factors influencing operators' behaviour
are acknowledged and attempts are made to understand how
these factors affect vehicle operators in their daily work.
Even though modern vehicles are equipped with new
technology that supposedly aids drivers, studies of actual
work typically reveal that these tools are not necessarily
suited for their purpose.
In a larger perspective, it is necessary not only to
improve this technology, but to redesign how vehicle
drivers perform their work. In practice, also traditional
processes for development of technology affect how the
operators work, although then simply a side effect of
technology being introduced. Based on a deep understanding
of the operators' work, the long-term goal here is to
instead design new ways of working that allows the
operators to use their skills to do meaningful driving
tasks supported by technology.
To acquire this understanding of how the operators work, a
new method of information acquisition has been developed
and tested within the rail and marine domains. These
studies resulted with an understanding of what train and
high-speed ferry operators are occupied with during their
journeys, as well as insights into why they perform in
certain manners and how they think and reason about these
tasks.}
}
@PhDThesis{ itlic:2006-003,
author = {Therese Berg},
title = {Regular Inference for Reactive Systems},
school = it,
department = docs,
year = 2006,
number = {2006-003},
type = {Licentiate thesis},
month = apr,
day = 27,
pages = 132,
abstract = {Models of reactive systems play a central role in many
techniques for verification and analysis of reactive
systems. Both a specification of the system and the
abstract behavior of the system can be expressed in a
formal model. Compliance with the functional parts in the
specification can be controlled in different ways. Model
checking techniques can be applied to a model of the system
or directly to source code. In testing, model-based
techniques can generate test suites from specification. A
bottleneck in model-based techniques is however to
construct a model of the system. This work concerns a
technique that automatically constructs a model of a system
without access to specification, code or internal
structure. We assume that responses of the system to
sequences of input can be observed. In this setting, so
called regular inference techniques build a model of the
system based on system responses to selected input
sequences.
There are three main contributions in this thesis. The
first is a survey on the most well-known techniques for
regular inference. The second is an analysis of Angluin's
algorithm for regular inference on synthesized examples. On
a particular type of examples, with prefix-closed
languages, typically used to model reactive systems, the
required number of input sequences grow approximately
quadratically in the number of transitions of the system.
However, using an optimization for systems with
prefix-closed languages we were able to reduce the number
of required input sequences with about 20\%. The third
contribution is a developed regular inference technique for
systems with parameters. This technique aims to better
handle entities of communications protocols where messages
normally have many parameters of which few determine the
subsequent behavior of the system. Experiments with our
implementation of the technique confirm a reduction of the
required number of input sequences, in comparison with
Angluin's algorithm.}
}
@PhDThesis{ itlic:2006-002,
author = {Anders Hessel},
title = {Model-Based Test Case Selection and Generation for
Real-Time Systems},
school = it,
department = docs,
year = 2006,
number = {2006-002},
type = {Licentiate thesis},
month = mar,
day = 7,
copies-printed= 80,
pages = 106,
abstract = {Testing is the dominating verification technique used in
industry today, and many man-hours and resources are
invested in the testing of software products. To cut down
the cost of testing, automated test execution becomes more
and more popular. However, the selection of which tests to
be executed is still mainly a manual process that is error
prone, and often without sufficient guarantees that the
system will be systematically tested. A way to achieve
systematic testing is to ensure that the tests satisfy a
required coverage criterion.
In this thesis two main problems are addressed: the problem
of how to formally specify coverage criteria, and the
problem of how to generate a test suite from a formal
system model, such that the test suite satisfies a given
coverage criterion. We also address the problem of how to
generate an optimal test suite, e.g., with respect to the
total time required to execute the test suite.
Our approach is to convert the test case generation problem
into a reachability problem. We observe that a coverage
criterion consists of a set of items, which we call
coverage items. The problem of generating a test case for
each coverage item can be treated as separate reachability
problems. We use an on-the-fly method, where coverage
information is added to the states of the analyzed system
model, to solve the reachability problem of a coverage
item. The coverage information is used to select a set of
test cases that together satisfy all the coverage items,
and thus the full coverage criterion.
Based on the view of coverage items we define a language,
in the form of parameterized observer automata, to formally
describe coverage criteria. We show that the language is
expressive enough to describe a variety of common coverage
criteria in the literature. Two different ways to generate
test suites form a system model and a given coverage
criterion are presented. The first way is based on model
annotations and uses the model checker Uppaal. The second
way, where no annotations are needed, is a modified
reachability analysis algorithm that is implemented in an
extended version of the Uppaal tool. }
}
@PhDThesis{ itlic:2006-001,
author = {Linda Brus},
title = {Recursive Black-box Identification of Nonlinear
State-space {ODE} Models},
school = it,
department = syscon,
year = 2006,
number = {2006-001},
type = {Licentiate thesis},
month = jan,
abstract = {Nonlinear system identification methods is a topic that
has been gaining interest over the last years. One reason
is the many application areas in controller design and
system development. However, the problem of modeling
nonlinear systems is complex and finding a general method
that can be used for many different applications is
difficult.
This thesis treats recursive identification methods for
identification of systems that can be described by
nonlinear ordinary differential equations. The general
model structure enables application to a wide range of
processes. It is also suitable for usage in combination
with many nonlinear controller design methods.
The first two papers of the thesis illustrates how a
recursive prediction error method (RPEM) can be used for
identification of an anaerobic digestion process and a
solar heating system. In the former case the model
complexity is significantly reduced compared to a
semi-physical model of the system, without loosing much in
model performance. In the latter case it is shown that it
is possible to reach convergence even for a small data set,
and that the resulting model is of comparable quality as a
previously published grey-box model of the same system.
The third paper consists of a convergence analysis of the
studied RPEM. The analysis exploits averaging analysis
using an associated ordinary differential equation, and
formulates conditions for convergence to a minimum of the
criterion function. Convergence to a true parameter set is
also illustrated by an example.
The fourth, and last, paper of this thesis addresses the
problem of finding suitable initial parameters
\textit{e.g.} for the RPEM. With a potentially non-convex
criterion function the choice of initial parameters becomes
decisive for whether the algorithm converges to the global
optimum, or a local one. The suggested initialization
algorithm is a Kalman filter based method. Experiments
using a simulated example show that the Kalman based method
can, under beneficial circumstances, be used for
initialization of the RPEM. The result is further supported
by successful identification experiments of a laboratory
scale cascaded tanks process, where the Kalman based method
was used for initialization. }
}
@PhDThesis{ itlic:2005-011,
author = {Bj{\"o}rn Holmberg},
title = {Towards Markerless Analysis of Human Motion},
school = it,
department = syscon,
year = 2005,
number = {2005-011},
type = {Licentiate thesis},
month = dec,
day = 16,
abstract = {The topic for this thesis is the analysis of human
movement, or more specifically, markerless analysis of
human movement from video material. By markerless analysis
is meant that the full image material is used as input in
contrast with the traditional marker systems that only use
the position of marker centers. The basic idea is to use
more of the information in the images to improve the
analysis.
Starting off with the aim of markerless analysis an
application is designed that use to the subject added
texture to estimate the position of the knee joint center
in real images. The approach show the plausibility of using
subject texture for estimation purposes.
Another issue that is addressed is how one can generate
synthetic image data. Using basic tools of graphics
programming a virtual environment used to synthesize data
is created. This environment is also used to evaluate some
different camera solutions.
One method to make three dimensional reconstruction from
multiple images of an object is tested using the synthetic
data. The method is based on a \emph{"brute force"}
approach and does not show good performance in terms of
computing speed. With appropriate representations of the
three dimensional objects, mathematica methods might speed
up the analysis. }
}
@PhDThesis{ itlic:2005-010,
author = {Paul Sj{\"o}berg},
title = {Numerical Solution of the {F}okker-{P}lanck Approximation
of the Chemical Master Equation},
school = it,
department = tdb,
year = 2005,
number = {2005-010},
type = {Licentiate thesis},
month = dec,
day = 15,
copies-printed= {84},
pages = {100},
abstract = {The chemical master equation (CME) describes the
probability for the discrete molecular copy numbers that
define the state of a chemical system. Each molecular
species in the chemical model adds a dimension to the state
space. The CME is a difference-differential equation which
can be solved numerically if the state space is truncated
at an upper limit of the copy number in each dimension. The
size of the truncated CME suffers from an exponential
growth for an increasing number of chemical species.
In this thesis the chemical master equation is approximated
by a continuous Fokker-Planck equation (FPE) which makes it
possible to use sparser computational grids than for CME.
FPE on conservative form is used to compute steady state
solutions by computation of an extremal eigenvalue and the
corresponding eigenvector as well as time-dependent
solutions by an implicit time-stepping scheme.
The performance of the numerical solution is compared to a
standard Monte Carlo algorithm. The computational work for
a solutions with the same estimated error is compared for
the two methods. Depending on the problem, FPE or the Monte
Carlo algorithm will be more efficient. FPE is well suited
for problems in low dimensions, especially if high accuracy
desirable.}
}
@PhDThesis{ itlic:2005-009,
author = {Magnus Evestedt},
title = {Parameter and State Estimation using Audio and Video
Signals},
school = it,
department = syscon,
year = 2005,
number = {2005-009},
pages = {110},
copies-printed= {80},
month = nov,
day = {29},
type = {Licentiate thesis},
abstract = {The complexity of industrial systems and the mathematical
models to describe them increases. In many cases point
sensors are no longer sufficient to provide controllers and
monitoring instruments with the information necessary for
operation. The need for other types of information, such as
audio and video, has grown. Suitable applications range in
a broad spectrum from microelectromechanical systems and
bio-medical engineering to papermaking and steel production.
This thesis is divided into five parts. First a general
introduction to the field of vision-based and sound-based
monitoring and control is given. A description of the
target application in the steel industry is included.
In the second part, a recursive parameter estimation
algorithm that does not diverge under lack of excitation is
studied. The focus is on the stationary properties of the
algorithm and the corresponding Riccati equation.
The third part compares the parameter estimation algorithm
to a number of well-known estimation techniques, such as
the Normalized Least Mean Squares and the Kalman filter.
The benchmark for the comparison is an acoustic echo
cancellation application. When the input is insufficiently
exciting, the studied method performs best of all
considered schemes.
The fourth part of the thesis concerns an experimental
application of vision-based estimation. A water model is
used to simulate the behaviour of the steel bath in a
Linz-Donawitz steel converter. The water model is captured
from the side by a video camera. The images together with a
nonlinear model is used to estimate important process
parameters, describing the heat and mass transport in the
process. The estimation results are compared to those
obtained by previous researchers and the suggested approach
is shown to decrease the estimation error variance by
$50\%$.
The complexity of the parameter estimation procedure by
means of optimization makes the computation time large. In
the final part, the time consumption of the estimation is
decreased by using a smaller number of data points. Three
ways of choosing the sampling points are considered. An
observer- based approach decreases the computation time
significantly, with an acceptable loss of accuracy of the
estimates. }
}
@PhDThesis{ itlic:2005-008,
author = {Niklas Johansson},
title = {Usable IT Systems for Mobile Work},
school = it,
department = hci,
year = 2005,
number = {2005-008},
type = {Licentiate thesis},
month = dec,
day = 9,
pages = 188,
copies-printed= 80,
abstract = {Today, mobile technology is making its entry into working
life within all sorts of occupations. When the purpose of
the technology is to support mo-bile work, new requirements
appear - for both the technology itself and for the
emerging new work processes - as a result of these new
conditions. Con-sequently, the introduction of a new IT
system will affect the organisation and the way work is
performed. This thesis addresses these changes in work
processes and ways to provide a supporting IT system. An
underlying com-ponent of my research work is the belief
that the personnel from the organi-sation itself must
participate in a large extent when developing new work
processes and when designing supporting IT systems, since
they will be us-ing the IT system as a tool in their future
work practice. To understand the nature of mobility in a
work context and how it affects usability in IT systems, I
have initiated studies of the area where mobile work is
supported by technology. Important characteristics have
been found that affect mobile work. My research work has
concerned traditional profes-sions, primarily professions
within mobile healthcare. An exhaustive study of how to
design new work processes within the area of home care of
the elderly has been carried out, accompanied by field
studies of mobile work within the mobile healthcare sector.
The results have been described in terms of aspects of
future work processes that are effective and sustainable.
Moreover, important characteristics of mobile technology
that support this kind of mobile work have been identified.
The two perspectives are then merged, in order to design
usable IT systems for mobile work.}
}
@PhDThesis{ itlic:2005-007,
author = {Mei Hong},
title = {On Two Methods for Identifying Dynamic Errors-in-Variables
Systems},
school = it,
department = syscon,
year = 2005,
number = {2005-007},
type = {Licentiate thesis},
month = nov,
day = 16,
pages = {108},
copies-printed= {80},
abstract = {Identification of dynamic errors-in-variables systems,
where both inputs and outputs are affected by errors
(measurement noises), is a fundamental problem of great
interest in many areas, such as process control,
econometrics, astronomical data reduction, image
processing, \textit{etc.}. This field has received
increased attention within several decades. Many solutions
have been proposed with different approaches. In this
thesis, the focus is on some specific problems concerning
two time domain methods for identifying linear dynamic
errors-in-variables systems.
The thesis is divided into four parts. In the first part, a
general introduction to the problem of identifying
errors-in-variables systems and different approaches to
solve the problem are given. Also, a summary of the
contributions and some topics for future works are
presented.
The second part of the thesis considers the instrumental
variables based approaches. They are studied under the
periodic excitation condition. The main motivation is to
analyze what type of instrumental variables should be
chosen to maximally utilize the information of the periodic
measurements. A particular overdetermined instrumental
variable estimator is proposed, which can achieve optimal
performance without weighting.
The asymptotic convergence properties of the
Bias-eliminating least squares (BELS) methods are
investigated in the third part. By deriving an error
dynamics equation for the parameter estimates, it is shown
that the convergence of the bias-eliminating algorithms is
determined by the largest magnitude of the eigenvalues of
the system matrix. To overcome the possible divergence of
the iteration-type bias-eliminating algorithms under very
low signal-to-noise ratio, we re-formulate the
bias-elimination problem as a minimization problem and
develop a variable projection algorithm to perform
consistent parameter estimation.
Part four contains an analysis of the accuracy properties
of the BELS estimates. It is shown that the estimated
system parameters and the estimated noise variances are
asymptotically Gaussian distributed. An explicit expression
for the normalized asymptotic covariance matrix of the
estimated system parameters and the estimated noise
variances is derived. }
}
@PhDThesis{ itlic:2005-006,
author = {Erik B{\"a}ngtsson},
title = {Robust Preconditioned Iterative Solution Methods for
Large-Scale Nonsymmetric Problems},
school = it,
department = tdb,
year = 2005,
number = {2005-006},
type = {Licentiate thesis},
month = nov,
day = 3,
note = {Typos corrected 2005-11-21},
abstract = {We study robust, preconditioned, iterative solution
methods for large-scale linear systems of equations,
arising from different applications in geophysics and
geotechnics.
The first type of linear systems studied here, which are
dense, arise from a boundary element type of discretization
of crack propagation in brittle material. Numerical
experiment show that simple algebraic preconditioning
strategies results in iterative schemes that are highly
competitive with a direct solution method.
The second type of algebraic systems are nonsymmetric and
indefinite and arise from finite element discretization of
the partial differential equations describing the elastic
part of glacial rebound processes. An equal order finite
element discretization is analyzed and an optimal
stabilization parameter is derived.
The indefinite algebraic systems are of 2-by-2-block form,
and therefore block preconditioners of block-factorized or
block-triangular form are used when solving the indefinite
algebraic system. There, the required Schur complement is
approximated in various ways and the quality of these
approximations is compared numerically.
When the block preconditioners are constructed from
incomplete fac\-toriza\-tions of the diagonal blocks, the
iterative scheme show a growth in iteration count with
increasing problem size. This growth is stabilized by
replacing the incomplete factors with an inner iterative
scheme with a (nearly) optimal order multilevel
preconditioner.}
}
@PhDThesis{ itlic:2005-005,
author = {Peter Naucl{\'e}r},
title = {Modeling and Control of Vibration in Mechanical
Structures},
school = it,
department = syscon,
year = 2005,
number = {2005-005},
type = {Licentiate thesis},
month = oct,
day = {26},
abstract = {All mechanical systems exhibit vibrational response when
exposed to external disturbances. In many engineering
applications vibrations are undesirable and may even have
harmful effects. Therefore, control of mechanical vibration
is an important topic and extensive research has been going
on in the field over the years.
In active control of vibration, the ability to actuate the
system in a controlled manner is incorporated into the
structure. Sensors are used to measure the vibrations and
secondary inputs to the system are used to actuate the
flexible body in order to obtain some desired structural
response.
In this thesis, feedback and feedforward control of active
structures are addressed. The thesis is divided into four
parts. The first part contains a general introduction to
the subject of active vibration control and also provides
an outline of the thesis.
The second part of the thesis treats modeling and feedback
control of a beam system with strain sensors and
piezoelectric actuators. Physical modeling and system
identification techniques are utilized in order to obtain a
low order parametric model that is suitable for controller
design.
The third part introduces the concept of a mechanical wave
diode, which is based on feedforward control. A controller
is derived on the basis of equations that describe elastic
waves in bars. The obtained controller is shown to have
poor noise properties and is therefore modified and further
analyzed.
The final part of the thesis treats the same type of wave
diode system as part three, but with more general
feedforward filters. Especially, a controller based on
Wiener filter theory is derived and shown to drastically
improve the performance of the mechanical wave diode.}
}
@PhDThesis{ itlic:2005-004,
author = {Oskar Wibling},
title = {Ad Hoc Routing Protocol Validation},
school = it,
department = docs,
year = 2005,
number = {2005-004},
type = {Licentiate thesis},
month = sep,
day = 23,
abstract = {We explore and evaluate methods for validation of ad hoc
routing protocols which are used to set up forwarding paths
in spontaneous networks of mobile devices. The focus is
automatic formal verification but we also make an initial
account of a protocol performance comparison using
structured live testing. State explosion is a major problem
in algorithmic verification of systems with concurrently
executing components. We comment on some of the best
reduction methods and their applicability to our work. For
reactively operating ad hoc routing protocols we provide a
broadcast abstraction which enables us to prove correct
operation of the Lightweight Underlay Network Ad hoc
Routing protocol (LUNAR) in scenarios of realistic size.
Our live testing effort has thus far uncovered significant
problems in the inter-operation between TCP and ad hoc
routing protocols. Severe performance degradation results
in networks of just a few nodes when very little mobility
is introduced. This indicates that verification for smaller
network sizes is also interesting since those
configurations may be the only useful ones for certain
types of applications.}
}
@PhDThesis{ itlic:2005-003,
author = {Magnus {\AA}gren},
title = {High-Level Modelling and Local Search},
school = it,
year = 2005,
number = {2005-003},
type = {Licentiate thesis},
month = sep,
abstract = {Combinatorial optimisation problems are ubiquitous in our
society and appear in such varied guises as DNA sequencing,
scheduling, configuration, airline-crew and nurse
rostering, combinatorial auctions, vehicle routing, and
financial portfolio design. Their efficient solution is
crucial to many people and has been the target for much
research during the last decades. One successful area of
research for solving such problems is constraint
programming. Yet, current-generation constraint programming
languages are considered by many, especially in industry,
to be too low-level, difficult, and large. In this thesis,
we argue that solver-independent, high-level relational
constraint modelling leads to a simpler and smaller
language, to more concise, intuitive, and analysable
models, as well as to more efficient and effective model
formulation, maintenance, reformulation, and verification.
All this can be achieved without sacrificing the
possibility of efficient solving, so that even time-pressed
modellers can be well assisted. Towards this, we propose
the ESRA relational constraint modelling language, showcase
its elegance on some real-life problems, and outline a
compilation philosophy for such languages.
In order to compile high-level languages such as ESRA to
current generation constraint programming languages, it is
essential that as much support as possible is available in
these languages. This is already the case in the
constructive search area of constraint programming where,
e.g., different kinds of domain variables, such as integer
variables and set variables, and expressive global
constraints are readily available. However, in the local
search area of constraint programming, this is not yet the
case and, until now, set variables were for example not
available. This thesis introduces set variables and set
constraints in the local search area of constraint
programming and, by doing this, considerably improves the
possibilities for using local search. This is true both for
modelling and solving problems using constraint-based local
search, as well as for using it as a possible target for
the compilation of ESRA models. Indeed, many combinatorial
optimisation problems have natural models based on set
variables and set constraints, three of which are
successfully solved in this thesis.
When a new set constraint is introduced in local search,
much effort must be spent on the design and implementation
of an appropriate incremental penalty function for the
constraint. This thesis introduces a scheme that, from a
high-level description of a set constraint in existential
second-order logic with counting, automatically synthesises
an incremental penalty function for that constraint. The
performance of this scheme is demonstrated by solving
real-life instances of a financial portfolio design problem
that seem unsolvable in reasonable time by constructive
search.}
}
@PhDThesis{ itlic:2005-002,
author = {H{\aa}kan Zeffer},
title = {Hardware-Software Tradeoffs in Shared-Memory
Implementations},
school = it,
department = docs,
year = 2005,
number = {2005-002},
type = {Licentiate thesis},
month = may,
abstract = {Shared-memory architectures represent a class of parallel
computer systems commonly used in the commercial and
technical market. While shared-memory servers typically
come in a large variety of configurations and sizes, the
advance in semiconductor technology have set the trend
towards multiple cores per die and multiple threads per
core.
Software-based distributed shared-memory proposals were
given much attention in the 90s. But their promise of short
time to market and low cost could not make up for their
unstable performance. Hence, these systems seldom made it
to the market. However, with the trend towards chip
multiprocessors, multiple hardware threads per core and
increased cost of connecting multiple chips together to
form large-scale machines, software coherence in one form
or another might be a good intra-chip coherence solution.
This thesis shows that data locality, software flexibility
and minimal processor support for read and write coherence
traps can offer good performance, while removing the hard
limit of scalability. Our aggressive fine-grained
software-only distributed shared-memory system exploits key
application properties, such as locality and sharing
patterns, to outperform a hardware-only machine on some
benchmarks. On average, the software system is 11 percent
slower than the hardware system when run on identical node
and interconnect hardware. A detailed full-system
simulation study of dual core CMPs, with multiple hardware
threads per core and minimal processor support for
coherence traps is on average one percent slower than its
hardware-only counter part when some flexibility is taken
into account. Finally, a functional full-system simulation
study of an adaptive coherence-batching scheme shows that
the number of coherence misses can be reduced with up to 60
percent and bandwidth consumption reduced with up to 22
percent for both commercial and scientific applications.}
}
@PhDThesis{ itlic:2005-001,
author = {Jesper Wilhelmsson},
title = {Efficient Memory Management for Message-Passing
Concurrency --- part {I}: Single-threaded execution},
school = it,
department = csd,
year = 2005,
number = {2005-001},
type = {Licentiate thesis},
month = may,
abstract = {Manual memory management is error prone. Some of the
errors it causes, in particular memory leaks and dangling
pointers, are hard to find. Manual memory management
becomes even harder when concurrency enters the picture. It
therefore gets more and more important to overcome the
problems of manual memory management in concurrent software
as the interest in these applications increases with the
development of new, multi-threaded, hardware.
To ease the implementation of concurrent software many
programming languages these days come with automatic memory
management and support for concurrency. This support,
called the concurrency model of the language, comes in many
flavors (shared data structures, message passing, etc.).
The performance and scalability of applications implemented
using such programming languages depends on the concurrency
model, the memory architecture, and the memory manager used
by the language. It is therefore important to investigate
how different memory architectures and memory management
schemes affect the implementation of concurrent software
and what performance tradeoffs are involved.
This thesis investigates ways of efficiently implementing
the memory architecture and memory manager in a concurrent
programming language. The concurrency model which we
explore is based on message passing with copying semantics.
We start by presenting the implementation of three
different memory architectures for concurrent software and
give a detailed characterization of their pros and cons
regarding message passing and efficiency in terms of memory
management. The issue examined is whether to use private
memory for all processes in a program or if there may be
benefits in sharing all or parts of the memory. In one of
the architectures looked at, called hybrid, a static
analysis called message analysis is used to guide the
allocation of message data.
Because the hybrid architecture is the enabling technology
for a scalable multi-threaded implementation, we then focus
on the hybrid architecture and investigate how to manage
the memory using different garbage collection techniques.
We present pros and cons of these techniques and discuss
their characteristics and their performance in concurrent
applications. Finally our experiences from turning the
garbage collector incremental are presented. The
effectiveness of the incremental collector is compared to
the non-incremental version. On a wide range of benchmarks,
the incremental collector we present is able to sustain
high mutator utilization (about 80\% during collection
cycles) at a low cost.
This work is implemented in an industrial-strength
implementation of the concurrent functional programming
language Erlang. Our eventual goal is to use the hybrid
architecture and the incremental garbage collector as the
basis for an efficient multi-threaded implementation of
Erlang. The work described in this thesis is a big step in
that direction.}
}
@PhDThesis{ itlic:2004-006,
author = {Stefan Johansson},
title = {High Order Difference Approximations for the Linearized
Euler Equations},
school = it,
department = tdb,
year = 2004,
number = {2004-006},
type = {Licentiate thesis},
month = dec,
abstract = {The computers of today make it possible to do direct
simulation of aeroacoustics, which is very computational
demanding since a very high resolution is needed.
In the present thesis we study issues of relevance for
aeroacoustic simulations. Paper A considers standard high
order difference methods. We study two different ways to
apply boundary conditions in a stable way. Numerical
experiments are done for the 1D linearized Euler equations.
In paper B we develop difference methods which give smaller
dispersion errors than standard central difference methods.
The new methods are applied to the 1D wave equation.
Finally in Paper C we apply the new difference methods to
aeroacoustic simulations based on the 2D linearized Euler
equations.
Taken together, the methods presented here lead to better
approximation of the wave number, which in turn results in
a smaller L2-error than obtained by previous methods found
in the literature. The results are valid when the problem
is not fully resolved, which usually is the case for large
scale applications.}
}
@PhDThesis{ itlic:2004-005,
author = {Henrik L{\"o}f},
title = {Parallelizing the Method of Conjugate Gradients for Shared
Memory Architectures},
school = it,
year = 2004,
number = {2004-005},
type = {Licentiate thesis},
month = nov,
abstract = {Solving Partial Differential Equations (PDEs) is an
important problem in many fields of science and
engineering. For most real-world problems modeled by PDEs,
we can only approximate the solution using numerical
methods. Many of these numerical methods result in very
large systems of linear equations. A common way of solving
these systems is to use an iterative solver such as the
method of conjugate gradients. Furthermore, due to the size
of these systems we often need parallel computers to be
able to solve them in a reasonable amount of time.
Shared memory architectures represent a class of parallel
computer systems commonly used both in commercial
applications and in scientific computing. To be able to
provide cost-efficient computing solutions, shared memory
architectures come in a large variety of configurations and
sizes. From a programming point of view, we do not want to
spend a lot of effort optimizing an application for a
specific computer architecture. We want to find methods and
principles of optimizing our programs that are generally
applicable to a large class of architectures.
In this thesis, we investigate how to implement the method
of conjugate gradients efficiently on shared memory
architectures. We seek algorithmic optimizations that
result in efficient programs for a variety of
architectures. To study this problem, we have implemented
the method of conjugate gradients using OpenMP and we have
measured the runtime performance of this solver on a
variety of both uniform and non-uniform shared memory
architectures. The input data used in the experiments come
from a Finite-Element discretization of the Maxwell
equations in three dimensions of a fighter-jet geometry.
Our results show that, for all architectures studied,
optimizations targeting the memory hierarchy exhibited the
largest performance increase. Improving the load balance,
by balancing the arithmetical work and minimizing the
number of global barriers showed to be of lesser
importance. Overall, \emph{bandwidth minimization} of the
iteration matrix showed to be the most efficient
optimization.
On non-uniform architectures, proper data distribution
showed to be very important. In our experiments we used
page migration to improve the data distribution during
runtime. Our results indicate that page migration can be
very efficient if we can keep the migration cost low.
Furthermore, we believe that page migration can be
introduced in a portable way into OpenMP in the form of a
directive with a \emph{affinity-on-next-touch} semantic.}
}
@PhDThesis{ itlic:2004-004,
author = {El Shobaki, Mohammed},
title = {On-Chip Monitoring for Non-Intrusive Hardware/Software
Observability},
school = it,
year = 2004,
number = {2004-004},
type = {Licentiate thesis},
month = sep,
abstract = {The increased complexity in today s state-of-the-art
computer systems make them hard to analyse, test, and
debug. Moreover, the advances in hardware technology give
system designers enormous possibilities to explore hardware
as a means to implement performance demanding
functionality. We see examples of this trend in novel
microprocessors, and Systems-on-Chip, that comprise
reconfigurable logic allowing for hardware/software
co-design. To succeed in developing computer systems based
on these premises, it is paramount to have efficient design
tools and methods.
An important aspect in the development process is
observability, i.e., the ability to observe the system s
behaviour at various levels of detail. These observations
are required for many applications: when looking for design
errors, during debugging, during performance assessments
and fine-tuning of algorithms, for extraction of design
data, and a lot more. In real-time systems, and computers
that allow for concurrent process execution, the
observability must be obtained without compromising the
system s functional and timing behaviour.
In this thesis we propose a monitoring system that can be
used for nonintrusive run-time observations of real-time
and concurrent computer systems. The monitoring system,
designated Multipurpose/Multiprocessor Application Monitor
(MAMon), is based on a hardware probe unit (IPU) which is
integrated with the observed system s hardware. The IPU
collects process-level events from a hardware Real-Time
Kernel (RTK), without perturbing the system, and transfers
the events to an external computer for analysis, debugging,
and visualisation. Moreover, the MAMon concept also
features hybrid monitoring for collection of more
fine-grained information, such as program instructions and
data flows.
We describe MAMon s architecture, the implementation of two
hardware prototypes, and validation of the prototypes in
different case-studies. The main conclusion is that process
level events can be traced non-intrusively by integrating
the IPU with a hardware RTK. Furthermore, the IPU s small
footprint makes it attractive for SoC designs, as it
provides increased system observability at a low hardware
cost. }
}
@PhDThesis{ itlic:2004-003,
author = {Yngve Sel{\'e}n},
title = {Model Selection},
school = it,
year = {2004},
number = {2004-003},
type = {Licentiate thesis},
month = oct,
abstract = {Before using a parametric model one has to be sure that it
offers a reasonable description of the system to be
modeled. If a bad model structure is employed, the obtained
model will also be bad, no matter how good is the parameter
estimation method. There exist many possible ways of
validating candidate models. This thesis focuses on one of
the most common ways, i.e., the use of information
criteria. First, some common information criteria are
presented, and in the later chapters, various extentions
and implementations are shown. An important extention,
which is advocated in the thesis, is the multi-model (or
model averaging) approach to model selection. This
multi-model approach consists of forming a weighted sum of
several candidate models, which then can be used for
inference.}
}
@PhDThesis{ itlic:2004-002,
author = {Markus Nord{\'e}n},
title = {Parallel {PDE} Solvers on cc-{NUMA} Systems},
school = it,
department = tdb,
year = 2004,
number = {2004-002},
type = {Licentiate thesis},
month = mar,
note = {Included papers are available at: Paper A:
\url{http://www.sciencedirect.com/science/article/B6V06-49W6S4M-1/2/23cb25585b2742595f319f4cedd0b65f},
Paper C:
\url{http://www.it.uu.se/research/publications/lic/2004-002/2004-002-C.pdf},
Paper D:
\url{http://www.it.uu.se/research/publications/reports/2004-006}}
,
abstract = {The current trend in parallel computers is that systems
with a large shared memory are becoming more and more
popular. A shared memory system can be either a uniform
memory architecture (UMA) or a cache coherent non-uniform
memory architecture (cc-NUMA).
In the present thesis, the performance of parallel PDE
solvers on cc-NUMA computers is studied. In particular, we
consider the shared namespace programming model,
represented by OpenMP. Since the main memory is physically,
or \emph{geographically} distributed over several
multi-processor nodes, the latency for local memory
accesses is smaller than for remote accesses. Therefore,
the \emph{geographical locality} of the data becomes
important.
The questions posed in this thesis are: (1)~How large is
the influence on performance of the non-uniformity of the
memory system? (2)~How should a program be written in order
to reduce this influence? (3)~Is it possible to introduce
optimizations in the computer system for this purpose?
Most of the application codes studied address the Euler
equations using a finite difference method and a finite
volume method respectively and are parallelized with
OpenMP. Comparisons are made with an alternative
implementation using MPI and with PDE solvers implemented
with OpenMP that solve other equations using different
numerical methods.
The main conclusion is that geographical locality is
important for performance on cc-NUMA systems. This can be
achieved through self optimization provided in the system
or through migrate-on-next-touch directives that could be
inserted automatically by the compiler.
We also conclude that OpenMP is competitive with MPI on
cc-NUMA systems if care is taken to get a favourable data
distribution. }
}
@PhDThesis{ itlic:2004-001,
author = {Niclas Sandgren},
title = {Parametric Methods for Frequency-Selective {MR}
Spectroscopy},
school = it,
department = syscon,
year = 2004,
number = {2004-001},
type = {Licentiate thesis},
month = mar,
abstract = {Accurate and efficient quantitation of the data components
in magnetic resonance spectroscopy (MRS) signals can be an
important step in the analysis of biochemical substances.
In several practical applications of MR spectroscopy the
user is interested only in the data components lying in a
small frequency band of the spectrum. A
frequency-selective, or sub-band, analysis deals precisely
with this kind of spectroscopy: estimation of only those
spectroscopic components that lie in a selected frequency
band of the (MR) data spectrum, with as little interference
as possible from the out-of-band components and in a
computationally efficient way. One obvious application for
such a sub-band technique is to lower the computational
burden in situations when the measured data sequence
contains too many samples to be processed using a standard
full-spectrum method. This thesis deals with several
parametric methods to perform a frequency-selective data
analysis. In addition, the possibility to incorporate prior
knowledge about some of the components in the data is
considered, a procedure that generally increases the
parameter estimation performance significantly. A data
model of exponentially damped sinusoids is assumed for the
presented methods, which are applied to both simulated and
experimental (in-vitro) MR data.
Keywords: frequency-selective spectroscopy;
frequency-domain data processing; sub-band analysis;
SVD-based parameter estimation; damped sinusoidal model.}
}
@PhDThesis{ itlic:2003-015,
author = {Erik Berg},
title = {Methods for Run Time Analysis of Data Locality},
school = it,
department = docs,
year = 2003,
number = {2003-015},
type = {Licentiate thesis},
month = dec,
abstract = {The growing gap between processor clock speed and DRAM
access time puts new demands on software and development
tools. Deep memory hierarchies and high cache miss
penalties in present and emerging computer systems make
execution time sensitive to data locality. Therefore,
developers of performance-critical applications and
optimizing compilers must be aware of data locality and
maximize cache utilization to produce fast code. To aid the
optimization process and help understanding data locality,
we need methods to analyze programs and pinpoint poor cache
utilization and possible optimization opportunities.
Current methods for run-time analysis of data locality and
cache behavior include functional cache simulation, often
combined with set sampling or time sampling, other
regularity metrics based on strides and data streams, and
hardware monitoring. However, they all share the trade-off
between run-time overhead, accuracy and explanatory power.
This thesis presents methods to efficiently analyze data
locality at run time based on cache modeling. It suggests
source- interdependence profiling as a technique for
examining the cache behavior of applications and locating
source code statements and/or data structures that cause
poor cache utilization. The thesis also introduces a novel
statistical cache-modeling technique, StatCache. Rather
than implementing a functional cache simulator, StatCache
estimates the miss ratios of fully-associative caches using
probability theory. A major advantage of the method is that
the miss ratio estimates can be based on very sparse
sampling. Further, a single run of an application is enough
to estimate the miss ratio of caches of arbitrary sizes and
line sizes and to study both spatial and temporal data
locality.}
}
@PhDThesis{ itlic:2003-014,
author = {Kajsa Ljungberg},
title = {Numerical Methods for Mapping of Multiple {QTL}},
school = it,
department = tdb,
year = 2003,
number = {2003-014},
type = {Licentiate thesis},
month = nov,
abstract = {This thesis concerns numerical methods for mapping of
multiple quantitative trait loci, QTL. Interactions between
multiple genetic loci influencing important traits, such as
growth rate in farm animals and predisposition to cancer in
humans, make it necessary to search for several QTL
simultaneously. Simultaneous search for $n$ QTL involves
solving an $n$-dimensional global optimization problem,
where each evaluation of the objective function consists of
solving a generalized least squares problem. In Paper A we
present efficient algorithms, mainly based on updated QR
factorizations, for evaluating the objective functions of
different parametric QTL mapping methods. One of these
algorithms reduces the computational work required for an
important function class by one order of magnitude compared
with the best of the methods used by other authors. In
Paper B previously utilized techniques for finding the
global optimum of the objective function are compared with
a new approach based on the DIRECT algorithm of Jones et
al. The new method gives accurate results in one order of
magnitude less time than the best of the formerly employed
algorithms. Using the algorithms presented in Papers A and
B, simultaneous search for at least three QTL, including
computation of the relevant empirical significance
thresholds, can be performed routinely.}
}
@PhDThesis{ itlic:2003-013,
author = {Stina Nylander},
title = {The Ubiquitous Interactor - Mobile Services with Multiple
User Interfaces},
school = it,
department = hci,
year = 2003,
number = {2003-013},
type = {Licentiate thesis},
abstract = {This licentiate thesis addresses design and development
problems that arise when service providers, and service
end-users face the variety of computing devices available
on the market. The devices are designed for many types of
use in various situations and settings, which means that
they have different capabilities in terms of presentation,
interaction, memory, etc. Service providers often handle
these differences by creating a new version for each
device. This creates a lot of development and maintenance
work, and often leads to restrictions on the set of devices
that services are developed for. For service end-users,
this means that it can be difficult to combine devices that
fit the intended usage context and services that provide
the needed content. New development methods that target
multiple devices from the start are needed. The differences
between devices call for services that can adapt to various
devices, and present themselves with device specific user
interfaces.
We propose a way of developing device independent services
by using interaction acts to describe user-service
interaction. Devices would interpret the interaction acts
and generate user interfaces according to their own
specific capabilities. Additional presentation information
can be encoded in customization forms, to further control
how the user interface would be generated. Different
devices would generate different user interfaces from the
same interaction acts, and a device could generate
different user interfaces from the same interaction acts
combined with different customization forms.
In this thesis, the interaction act and customization form
concepts are described in detail. A system prototype
handling them and two sample services have been
implemented. Preliminary evaluations indicate that
interaction acts and customization forms constitute a
feasible approach for developing services with multiple
user interfaces. The thesis concludes with a discussion of
the problems arising when evaluating this kind of systems,
and some conclusions on how to continue the evaluation
process.}
}
@PhDThesis{ itlic:2003-012,
author = {Olivier Amoignon},
title = {Adjoint-Based Aerodynamic Shape Optimization},
school = it,
department = tdb,
year = 2003,
number = {2003-012},
type = {Licentiate thesis},
month = oct,
note = {In the first printed version, many references to the
bibliography are off by one. The online version does not
have this error},
abstract = {An adjoint system of the Euler equations of gas dynamics
is derived in order to solve aerodynamic shape optimization
problems with gradient-based methods. The derivation is
based on the fully discrete flow model and involves
differentiation and transposition of the system of
equations obtained by an unstructured and node-centered
finite-volume discretization. Solving the adjoint equations
allows an efficient calculation of gradients, also when the
subject of optimization is described by hundreds or
thousands of design parameters.
Such a fine geometry description may cause wavy or
otherwise irregular designs during the optimization
process. Using the one-to-one mapping defined by a Poisson
problem is a known technique that produces smooth design
updates while keeping a fine resolution of the geometry.
This technique is extended here to combine the smoothing
effect with constraints on the geometry, by defining the
design updates as solutions of a quadratic programming
problem associated with the Poisson problem.
These methods are applied to airfoil shape optimization for
reduction of the wave drag, that is, the drag caused by gas
dynamic effects that occur close to the speed of sound. A
second application concerns airfoil design optimization to
delay the laminar-to-turbulent transition point in the
boundary layer in order to reduce the drag. The latter
application has been performed by the author with
collaborators, also using gradient-based optimization.
Here, the growth of convectively unstable disturbances are
modeled by successively solving the Euler equations, the
boundary layer equations, and the parabolized stability
equations.}
}
@PhDThesis{ itlic:2003-011,
author = {Tobias Amnell},
title = {Code Synthesis for Timed Automata},
school = it,
department = docs,
year = 2003,
number = {2003-011},
type = {Licentiate thesis},
month = oct,
abstract = {In this thesis, we study executable behaviours of timed
models. The focus is on synthesis of executable code with
predictable behaviours from high level abstract models. We
assume that a timed system consists of two parts: the
control software and the plant (i.e. the environment to be
controlled). Both are modelled as timed automata extended
with real time tasks. We consider the extended timed
automata as design models.
We present a compilation procedure to transform design
models to executable code including a run-time scheduler
(run time system) preserving the correctness and
schedulability of the models. The compilation procedure has
been implemented in a prototype C-code generator for the
brickOS operating system included in the Times tool. We
also present an animator, based on hybrid automata, to be
used to describe a simulated environment (i.e. the plant)
for timed systems. The tasks of the hybrid automata define
differential equations and the animator uses a differential
equations solver to calculate step-wise approximations of
real valued variables. The animated objects, described as
hybrid automata, are compiled by the Times tool into
executable code using a similar procedure as for controller
software.
To demonstrate the applicability of timed automata with
tasks as a design language we have developed the control
software for a production cell. The production cell is
built in LEGO and is controlled by a Hitachi H8 based
LEGO-Mindstorms control brick. The control software has
been analysed (using the Times tool) for schedulability and
other safety properties. Using results from the analysis we
were able to avoid generating code for parts of the design
that could never be reached, and could also limit the
amount of memory allocated for the task queue.}
}
@PhDThesis{ itlic:2003-010,
author = {Dan Wallin},
title = {Exploiting Data Locality in Adaptive Architectures},
school = it,
department = docs,
year = 2003,
number = {2003-010},
type = {Licentiate thesis},
month = sep,
abstract = {The speed of processors increases much faster than the
memory access time. This makes memory accesses expensive.
To meet this problem, cache hierarchies are introduced to
serve the processor with data. However, the effectiveness
of caches depends on the amount of locality in the
application's memory access pattern. The behavior of
various programs differs greatly in terms of cache miss
characteristics, access patterns and communication
intensity. Therefore a computer built for many different
computational tasks potentially benefits from dynamically
adapting to the varying needs of the applications.
This thesis shows that a cc-NUMA multiprocessor with data
migration and replication optimizations efficiently
exploits the temporal locality of algorithms. The
performance of the self-optimizing system is similar to a
system with a perfect initial thread and data placement.
Data locality optimizations are not for free. Large cache
line coherence protocols improve spatial locality but yield
increases in false sharing misses for many applications.
Prefetching techniques that reduce the cache misses often
lead to increased address and data traffic. Several
techniques introduced in this thesis efficiently avoid
these drawbacks. The bundling technique reduces the
coherence traffic in multiprocessor prefetchers. This is
especially important in snoop-based systems where the
coherence bandwidth is a scarce resource. Bundled
prefetchers manage to reduce both the cache miss rate and
the coherence traffic compared with non-prefetching
protocols. The most efficient bundled prefetching protocol
studied, lowers the cache misses by 27 percent and the
address snoops by 24 percent relative to a non-prefetching
protocol on average for all examined applications. Another
proposed technique, capacity prefetching, avoids false
sharing misses by distinguishing between cache lines
involved in communication from non-communicating cache
lines at run-time.}
}
@PhDThesis{ itlic:2003-009,
author = {Martin Karlsson},
title = {Cache Memory Design Trade-offs for Current and Emerging
Workloads},
school = it,
department = docs,
year = 2003,
number = {2003-009},
type = {Licentiate thesis},
month = sep,
abstract = {The memory system is the key to performance in
contemporary computer systems. When designing a new memory
system, architectural decisions are often arbitrated based
on their expected performance effect. It is therefore very
important to make performance estimates based on workloads
that accurately reflect the future use of the system. This
thesis presents the first memory system characterization
study of Java-based middleware, which is an emerging
workload likely to be an important design consideration for
next generation processors and servers.
Manufacturing technology has reached a point where it is
now possible to fit multiple full-scale processors and
integrate board-level features on a chip. The raised
competition for chip resources has increased the need to
design more effective caches without trading off area or
power. Two common ways to improve cache performance is to
increase the size or associativity of the cache. Both of
these approaches come at a high cost in chip area as well
as power.
This thesis presents two new cache organizations, each
aimed at more efficient use of either power or area. First,
the Elbow cache is presented, which is shown to be a
power-efficient alternative to highly set-associative
caches. Secondly, a selective cache allocation algorithm is
presented, RASCAL, that significantly reduces the miss
ratio at a limited cost in area.}
}
@PhDThesis{ itlic:2003-008,
author = {Zoran Radovic},
title = {Efficient Synchronization and Coherence for Nonuniform
Communication Architectures},
school = it,
department = docs,
year = 2003,
number = {2003-008},
type = {Licentiate thesis},
month = sep,
abstract = {Nonuniformity is a common characteristic of contemporary
computer systems, mainly because of physical distances in
computer designs. In large multiprocessors, the access to
shared memory is often nonuniform, and may vary as much as
ten times for some nonuniform memory access (NUMA)
architectures, depending on if the memory is close to the
requesting processor or not. Much research has been devoted
to optimizing such systems.
This thesis identifies another important property of
computer designs, nonuniform communication architecture
(NUCA). High-end hardware-coherent machines built from a
few large nodes or from chip multiprocessors, are typical
NUCA systems that have a lower penalty for reading recently
written data from a neighbor's cache than from a remote
cache. The first part of the thesis identifies node
affinity as an important property for scalable
general-purpose locks. Several software-based hierarchical
lock implementations that exploit NUCAs are presented and
investigated. This type of lock is shown to be almost twice
as fast for contended locks compared with other
software-based lock implementations, without introducing
significant overhead for uncontested locks.
Physical distance in very large systems also limits
hardware coherence to a subsection of the system. Software
implementations of distributed shared memory (DSM) are
cost-effective solutions that extend the upper scalability
limit of such machines by providing the ``illusion'' of
shared memory across the entire system. This also creates
NUCAs with even larger local-remote penalties, since the
coherence is maintained entirely in software.
The major source of inefficiency for traditional software
DSM implementations comes from the cost of interrupt-based
asynchronous protocol processing, not from the actual
network latency. As the raw hardware latency of internode
communication decreases, the asynchronous overhead in the
communication becomes more dominant. This thesis introduces
the DSZOOM system that removes this type of overhead by
running the entire coherence protocol in the requesting
processor.}
}
@PhDThesis{ itlic:2003-007,
author = {Maria Karlsson},
title = {Market Based Programming and Resource Allocation},
school = it,
department = csd,
year = 2003,
number = {2003-007},
type = {Licentiate thesis},
month = jun,
abstract = {The subject of this thesis is the concept of
market-oriented programming and market protocols. We want
to solve an allocation problem where some resources are to
be divided among a number of agents. Each agent has a
utility function telling how much the current allocation is
worth for it. The goal is to allocate the resources among
the agents in a way that maximizes the sum of the utilities
of all agents. To solve this problem we use the concept of
markets to create mechanisms for computational
implementation.
To achieve the advantages of market oriented programming,
we have to consider the conceptual view of the problem a
main design issue. We want to investigate the possibilities
to build computationally effective mechanisms which
maintain the intuitive, easy-to-understand structure of
market-based approaches. In the first paper we look at two
examples from the literature and show that conceptual
improvements of the approaches will make agent behavior
more realistic. This will also make the examples fit into a
more general theory. In the second paper we create a market
mechanism for handling combinatorial markets. The mechanism
includes an auction, where each iteration runs in
polynomial time. The mechanism shows good performance when
the number of resources is relatively small compared to the
number of agents. }
}
@PhDThesis{ itlic:2003-006,
author = {Tomas Olsson},
title = {Bootstrapping and Decentralizing Recommender Systems},
school = it,
department = csd,
year = 2003,
number = {2003-006},
type = {Licentiate thesis},
month = jun,
abstract = {This thesis consists of three papers on recommender
systems.
The first paper addresses the problem of making
decentralized recommendations using a peer-to-peer
architecture. Collaborating recommender agents are
connected into a network of neighbors that exchange user
recommendations to find new items to recommend. We achieved
a performance comparable to a centralized system.
The second paper deals with the bootstrapping problem for
centralized recommender systems. Most recommender systems
learn from experience but initially there are no users and
no rated items to learn from. To deal with this problem we
have developed the Genesis method. The method bootstraps a
recommender system with artificial user profiles sampled
from a probabilistic model built from prior knowledge. The
paper describes how this was done for a k- nearest neighbor
recommender algorithm in the movie domain. We were able to
improve the performance of a k-nearest neighbor algorithm
for single predictions but not for rank ordered lists of
recommendations.
The third paper identifies a new domain for recommendations
-- product configuration -- where new recommender
algorithms are needed. A system that helps users
configuring their own PCs is described. Recommendations and
a cluster-based help system together with a rule-based
configurator assist the users in selecting appropriate
features or complete PC configurations. The configurator
ensures that users cannot choose incompatible components
while the recommender system adds social information based
on what other users have chosen. This introduces new
complexity in the recommendation process on how to combine
the recommendations from the configurator and the
recommender system. The paper proposes (1) three new
recommender algorithms on how to make recommendations in
the domain of product configuration, (2) a method for
adding social recommendations to a rule-based configurator
and (3) a method for applying the Genesis method in this
domain. In this case the Genesis method is implemented by a
Bayesian belief net that captures the designers' prior
knowledge on how to configure PCs. Then instances of
complete configurations are sampled from the model and
added to the recommender algorithm. }
}
@PhDThesis{ itlic:2003-005,
author = {Mats Ekman},
title = {Urban Water Management - Modelling, Simulation and Control
of the Activated Sludge Process},
school = it,
department = syscon,
year = 2003,
number = {2003-005},
type = {Licentiate thesis},
month = may,
note = {Errata (updated Jan 2004) available at
\url{http://www.it.uu.se/research/publications/lic/2003-005/Errata.pdf}
and
\url{http://www.it.uu.se/research/publications/lic/2003-005/Errata.ps.gz}}
,
abstract = {During the last few decades, wastewater treatment
processes in urban water management have become more and
more efficient and complex. Several factors such as
urbanization, stricter legislations on effluent quality,
economics, increased knowledge of the involved biological,
chemical and physical processes as well as technical
achievements have been important incentives for the
development of more efficient procedures for wastewater
treatment plants. Future requirements on more sustainable
urban water systems, in combination with increasing
wastewater loads, will most probably further increase the
need for optimization and control of both existing and
emerging wastewater treatment processes.
In this thesis estimation, modelling and control strategies
are developed in order to improve the efficiency of the
activated sludge process.
The first part of the thesis presents a JAVA based
simulator of the activated sludge process. An overview of
its features, with some emphasis on implemented control
strategies, is given. In particular, a new control strategy
for the internal recycling flow rate is described. A
summary of experiences from using the simulator as a
teaching and training tool is also given.
The second part of the thesis includes a derivation of
reduced order models for the activated sludge process. The
resulting models, a time-varying linear state-space model
for the anoxic part and a time-varying bilinear state-space
model for the aerobic part, are intended to be used for
control applications.
In the third part, an adaptive control strategy for control
of the nitrate level using an external carbon source is
presented. The controller adapts and compensates for
changes in the system dynamics since important system
parameters are estimated adaptively and incorporated
on-line in the control design. The estimated system
parameters and model states also give guidance about the
state of the process and the characteristics of the
wastewater. Several simulation examples are used to
illustrate the control law.
Finally, a new suboptimal control law for the bilinear
quadratic regulator problem with infinite final time is
presented. The control strategy is evaluated in a
simulation study, where special concern is devoted to
controlling the activated sludge process, using the
bilinear model developed in the second part of this thesis.
}
}
@PhDThesis{ itlic:2003-004,
author = {Malin Ljungberg},
title = {Handling of Curvilinear Coordinates in a {PDE} Solver
Framework},
school = it,
department = tdb,
year = 2003,
number = {2003-004},
type = {Licentiate thesis},
month = may,
abstract = {By the use of object-oriented analysis and design combined
with variability modeling a highly flexible software model
for the metrics handling functionality of a PDE solver
framework was obtained. This new model was evaluated in
terms of usability, particularly with respect to efficiency
and flexibility. The efficiency of a pilot implementation
is similar to, or even higher than that of a pre-existing
application-specific reference code. With regards to
flexibility it is shown that the new software model
performs well for a set of four change scenarios selected
by an expert user group.}
}
@PhDThesis{ itlic:2003-003,
author = {Inger Boivie},
title = {Usability and Users' Health Issues in Systems
Development},
school = it,
department = hci,
year = 2003,
number = {2003-003},
type = {Licentiate thesis},
month = mar,
abstract = {The figures of reported health problems in
computer-supported, administrative, work are alarmingly
high and increasing. The main health problems are visual
discomfort, repetitive strain injuries (RSI) and
stress-related disorders. Some important risk factors are
poor workstation design, constrained work postures,
repetitive work and long hours of computer use every day.
Others are high demands, poor control over workload and
work pace and poor relations to management and colleagues.
There is also evidence that poor design and usability of
the computer systems as well as technical problems with the
computer add to the pressure perceived by the user, which
may in its turn cause stress-related disorders.
Systems (software) development is often technology-driven
and the design and contents of the resulting system shapes
the work situation, including factors affecting the users'
health and well-being. There are numerous examples in the
literature describing how poorly designed systems fail to
support the real work practices, introducing new ones that
are inadequate and more time-consuming. Thus these,
supposedly supporting, computer systems get in the way of
efficient and effective work, adding a burden on the
workers rather than helping them out.
This thesis tries to describe some of the relations between
the systems development process and users' health
complaints, in a work context. I also discuss whether or
not the concepts of usability and user experience can be
used to address users' health issues in the systems
development process. The main results indicate that
although usability must be addressed, it is not sufficient.
Occupational health issues must be explicitly integrated in
systems development, and be given priority. This thesis
also describes some potential methods and techniques for
doing that. }
}
@PhDThesis{ itlic:2003-002,
author = {Jenny Persson},
title = {Basic Values in Software Development and Organizational
Change},
school = it,
department = hci,
year = 2003,
number = {2003-002},
type = {Licentiate thesis},
month = mar,
abstract = {This thesis is to be seen as a preliminary version of my
doctoral thesis. It deals with problems concerning how our
basic values influence development processes, mainly within
the areas of software development and organizational
change. Two main foci are presented, one theoretical and
one empirical. The theoretical focus deals with the
question: Is there objectivity in the theoretical
framework, the knowledge and the experience on which the
choices of strategies are based? The empirical focus deals
with the question: How can knowledge about healthy work,
usability and organizational matters be put into practice
in the organizational and software development process? The
preliminary conclusion shows that, when put under pressure,
basic values are often clearly evident, though not
necessarily evident to the individual herself. The will to
grasp at models or methods that follow contemporary trends
increases as well. In the end, time and money control the
process, and all the magnificent ideas of a system built
for a better work environment have soon faded away. This
loss reflects the basic values in the organization, as well
as it reflects a lack of awareness of the consequences of
different strategic decisions.
Key Words: Basic Values, Management, Organizational Change,
Metaphors, Organizational Learning, Systems Development,
Computer Ethics, Occupational Health, Work Environment,
Usability}
}
@PhDThesis{ itlic:2003-001,
author = {Per Sundqvist},
title = {Preconditioners and Fundamental Solutions},
school = it,
department = tdb,
year = 2003,
number = {2003-001},
type = {Licentiate thesis},
month = mar,
abstract = {New preconditioning techniques for the iterative solution
of systems of equations arising from discretizations of
partial differential equations are considered. Fundamental
solutions, both of differential and difference operators,
are used as kernels in discrete, truncated convolution
operators. The intention is to approximate inverses of
difference operators that arise when discretizing the
differential equations. The approximate inverses are used
as preconditioners.
The technique using fundamental solutions of differential
operators is applied to scalar problems in two dimensions,
and grid independent convergence is obtained for a first
order differential equation.
The problem of computing fundamental solutions of
difference operators is considered, and we propose a new
algorithm. It can be used also when the symbol of the
difference operator is not invertible everywhere, and it is
applicable in two or more dimensions.
Fundamental solutions of difference operators are used to
construct preconditioners for non-linear systems of
difference equations in two dimensions. Grid independent
convergence is observed for two standard finite difference
discretizations of the Euler equations in a
non-axisymmetric duct.}
}
@PhDThesis{ itlic:2002-008,
author = {Henrik Lundgren},
title = {Implementation and Real-world Evaluation of Routing
Protocols for Wireless Ad hoc Networks},
school = it,
year = 2002,
number = {2002-008},
type = {Licentiate thesis},
month = nov,
abstract = {A wireless ad hoc network consists of a number of mobile
nodes that temporarily form a dynamic infrastructure-less
network. To enable communication between nodes that do not
have direct radio contact, each node must function as a
wireless router and potentially forward data traffic on
behalf of the others. New routing protocols are needed as
the existing protocols used in wired networks adapt too
slowly to the frequent topology changes induced by mobility
and are inefficient in terms of resource consumption.
During the last five to ten years more than 60 different ad
hoc routing protocols have been proposed, optimized and
partially compared based on simulation studies. However, we
believe that these simulation studies need to be
complemented with real-world experiments in order to gain
practical experience and reveal real-world effects that may
not be visible in simulations.
This thesis surveys the field of ad hoc routing and related
real-world testbeds. We also describe our research results
from three included papers:
In our active networking approach to ad hoc routing,
protocol logic is carried inside data packets, which
enables new protocols to be independently deployed at
run-time. As a proof of concept, we have implemented two ad
hoc routing protocols and successfully used them for
routing a real-time sensitive audio stream. This prototype
gave us a good understanding of the practical aspects of
wireless networking and prepared good ground for protocol
comparisons.
We have implemented a testbed called APE which enables easy
management and analysis of real-world experiments. In
real-world experiments, with up to 37 mobile nodes, we used
the APE testbed to assess the reproducibility between
repeated testruns and compared three different routing
protocols. This testbed has become a crucial tool for
discovering flaws in proposed protocols, as well as for
benchmarking different routing styles.
During our implementation of the AODV protocol we
discovered a performance problem that we could relate to
some invalid assumptions about the wireless link
characteristics. We explored this `communication gray zone'
problem and implemented three countermeasures which we
compared by using the APE testbed. As a result we could
eliminate the performance discrepancy between simulated
AODV and its implementation in the real-world.}
}
@PhDThesis{ itlic:2002-007,
author = {Samuel Sundberg},
title = {Semi-Toeplitz Preconditioning for Linearized Boundary
Layer Problems},
school = it,
department = tdb,
year = 2002,
number = {2002-007},
type = {Licentiate thesis},
month = nov,
note = {Included papers available at
\url{http://www.it.uu.se/research/publications/lic/2002-007/2002-007-paperA.pdf},
\url{http://www.it.uu.se/research/publications/lic/2002-007/2002-007-paperA.ps.gz},
\url{http://www.it.uu.se/research/publications/lic/2002-007/2002-007-paperB.ps.gz},
and
\url{http://www.it.uu.se/research/publications/lic/2002-007/2002-007-paperB.pdf}}
,
abstract = {We have defined and analyzed a semi-Toeplitz
preconditioner for time-dependent and steady-state
convection-diffusion problems. Analytic expressions for the
eigenvalues of the preconditioned systems are obtained. An
asymptotic analysis shows that the eigenvalue spectrum of
the time-dependent problem is reduced to two eigenvalues
when the number of grid points go to infinity. The
numerical experiments sustain the results of the
theoretical analysis, and the preconditioner exhibits a
robust behavior for stretched grids.
A semi-Toeplitz preconditioner for the linearized
Navier--Stokes equations for compressible flow is proposed
and tested. The preconditioner is applied to the linear
system of equations to be solved in each time step of an
implicit method. The equations are solved with flat plate
boundary conditions and are linearized around the Blasius
solution. The grids are stretched in the normal direction
to the plate and the quotient between the time step and the
space step is varied. The preconditioner works well in all
tested cases and outperforms the method without
preconditioning both in number of iterations and execution
time.}
}
@PhDThesis{ itlic:2002-006,
author = {Kalyani Munasinghe},
title = {On Using Mobile Agents for Load Balancing in High
Performance Computing},
school = it,
department = tdb,
year = 2002,
number = {2002-006},
type = {Licentiate thesis},
month = jun,
abstract = {One recent advance in software technology is the
development of software agents that can adapt to changes in
their environment and can cooperate and coordinate their
activities to complete a given task. Such agents can be
distributed over a network.
Advances in hardware technology have meant that clusters of
workstations can be used to create parallel virtual
machines that bring the power of parallel computing to a
much wider research and development community. Many
software packages are now being developed to utilise such
cluster environments.
In a cluster, each processor will be multitasking and
running other jobs simultaneously with a distributed
application that uses a message passing environment such as
MPI. A typical application might be a large scale
mesh-based computation, such as a finite element code, in
which load balancing is equivalent to mesh partitioning.
When the load is varying between processors within the
cluster, distributing the computation in equal amounts may
not deliver the optimum performance. Some machines may be
very heavily loaded by other users while other processors
may have no such additional load. It may be beneficial to
measure current system information and use this information
when balancing the load within a single distributed
application program.
This thesis presents one approach to distributing workload
more efficiently in a multi-user distributed environment by
using mobile agents to collect system information which is
then transmitted to all the MPI tasks. The thesis contains
a review of software agents and mesh partitioning together
with some numerical experiments and a paper.
}
}
@PhDThesis{ itlic:2002-005,
author = {Kaushik Mahata},
title = {Identification of Dynamic Errors-in-Variables Models},
school = it,
department = syscon,
year = 2002,
number = {2002-005},
type = {Licentiate thesis},
month = may,
abstract = {The problem of identifying dynamic errors-in-variables
models is of fundamental interest in many areas like
process control, array signal processing, astronomical data
reduction. In recent years, this field has received
increased attention of the research community. In this
thesis, some time domain and frequency domain approaches
for identifying the errors-in-variables model is studied.
The first chapter gives an overview of various methods for
identifying dynamic errors-in-variables systems. Several
approaches are classified and a qualitative comparison of
different existing methods is also presented. The second
chapter deals with instrumental variables based approaches.
The least squares and the total least squares methods of
solving the Yule-Walker equation is of central interest
here. The methods are compared from the view point of
asymptotic performance, numerical robustness and
computation. The method presented in the third chapter uses
prefiltered data. The input-output data is passed through a
pair of user defined prefilters and the output data from
the prefilters is subjected to a least-squares like
algorithm. Compared to the IV approach, the proposed method
shows a significant improvement in the small-sample
properties of the MA parameter estimates, without any
increase in the computational load. In the fourth chapter,
we show that the two-dimensional process composed of the
input-output data admits a finite order ARMA
representation. Then we propose a parametric identification
algorithm and another non-parametric identification method
based on the ARMA representation. }
}
@PhDThesis{ itlic:2002-004,
author = {Martin Nilsson},
title = {Iterative Solution of Maxwell's Equations in Frequency
Domain},
school = it,
department = tdb,
year = 2002,
number = {2002-004},
type = {Licentiate thesis},
month = jun,
abstract = {We have developed an iterative solver for the Moment
Method. It computes a matrix vector product with the
multilevel Fast Multipole Method, which makes the method
scale with the number of unknowns. The iterative solver is
of Block Quasi-Minimum Residual type and can handle several
right hand sides at once. The linear system is
preconditioned with a Sparse Approximate Inverse, which is
modified to handle dense matrices. The solver is
parallelized on shared memory machines using OpenMP.
To verify the method some tests are conducted on varying
geometries. We use simple geometries to show that the
method works. We show that the method scales on several
processors of a shared memory machine. To prove that the
method works for real life problems, we do some tests on
large scale aircrafts. The largest test is a one million
unknown simulation on a full scale model of a fighter
aircraft. }
}
@PhDThesis{ itlic:2002-003,
author = {Emad Abd-Elrady},
title = {Harmonic Signal Modeling Based on the Wiener Model
Structure},
school = it,
department = syscon,
year = 2002,
number = {2002-003},
type = {Licentiate thesis},
month = may,
url = {http://www.it.uu.se/research/publications/lic/2002-003/files/}
,
abstract = {The estimation of frequencies and corresponding harmonic
overtones is a problem of great importance in many
situations. Applications can, for example, be found in
supervision of electrical power transmission lines, in
seismology and in acoustics. Generally, a periodic function
with an unknown fundamental frequency in cascade with a
parameterized and unknown nonlinear function can be used as
a signal model for an arbitrary periodic signal. The main
objective of the proposed modeling technique is to estimate
the fundamental frequency of the periodic function in
addition to the parameters of the nonlinear function. The
thesis is divided into four parts. In the first part, a
general introduction to the harmonic signal modeling
problem and different approaches to solve the problem are
given. Also, an outline of the thesis and future research
topics are introduced. In the second part, a previously
suggested recursive prediction error method (RPEM) for
harmonic signal modeling is studied by numerical examples
to explore the ability of the algorithm to converge to the
true parameter vector. Also, the algorithm is modified to
increase its ability to track the fundamental frequency
variations. A modified algorithm is introduced in the third
part to give the algorithm of the second part a more stable
performance. The modifications in the RPEM are obtained by
introducing an interval in the nonlinear block with fixed
static gain. The modifications that result in the
convergence analysis are, however, substantial and allows a
complete treatment of the local convergence properties of
the algorithm. Moreover, the Cramer-Rao bound (CRB) is
derived for the modified algorithm and numerical
simulations indicate that the method gives good results
especially for moderate signal to noise ratios (SNR). In
the fourth part, the idea is to give the algorithm of the
third part the ability to estimate the driving frequency
and the parameters of the nonlinear output function
parameterized also in a number of adaptively estimated grid
points. Allowing the algorithm to automatically adapt the
grid points as well as the parameters of the nonlinear
block, reduces the modeling errors and gives the algorithm
more freedom to choose the suitable grid points. Numerical
simulations indicate that the algorithm converges to the
true parameter vector and gives better performance than the
fixed grid point technique. Also, the CRB is derived for
the adaptive grid point technique.}
}
@PhDThesis{ itlic:2002-002,
author = {Anders Berglund},
title = {On the Understanding of Computer Network Protocols},
school = it,
department = docs,
year = 2002,
number = {2002-002},
type = {Licentiate thesis},
month = mar,
url = {http://www.docs.uu.se/~andersb/lic/},
oldnote = {Included papers available at
\url{http://www.it.uu.se/research/publications/lic/2002-002/paperA-2002-002.pdf}
and
\url{http://www.it.uu.se/research/publications/lic/2002-002/paperB-2002-002.pdf}}
,
abstract = {How students learn about network protocols is studied in a
project-centred, internationally distributed, university
course in computer systems taught jointly by two
universities. Insights into students' understanding of
basic concepts within computer networks are gained through
an empirical phenomenographic research approach.
The use of phenomenography as a research approach makes it
possible to learn about computer science, as it is
experienced by the students. The context in which the
research is carried out and issues concerning by whom the
context is experienced, are investigated and form a part of
the methodological basis.
Students' understanding of some protocols that are used
within the project, as well as their experience of the
general concept of network protocols are investigated, and
different ways of experiencing the protocols are discerned.
Some aspects that indicate good learning outcomes are
identified, such as being capable of understanding a
protocol in different ways and of making relevant choices
between the ways it could be experienced according to the
context in which it appears.
Based on these results a discussion on learning and
teaching is developed. It is argued that a variation in the
context in which the protocol is experienced promotes good
learning, since different ways of experiencing a protocol
are useful with different tasks to hand. A student with a
good understanding of network protocols can choose in a
situationally relevant way between different ways of
experiencing a protocol. }
}
@PhDThesis{ itlic:2002-001,
author = {Eva Mossberg},
title = {Higher Order Finite Difference Methods for Wave
Propagation Problems},
school = it,
department = tdb,
year = 2002,
month = feb,
number = {2002-001},
type = {Licentiate thesis},
abstract = {Wave propagation is described by the wave equation, or in
the time-periodic case, by the Helmholtz equation. For
problems with small wavelengths, high order discretizations
must be used to resolve the solution. Two different
techniques for finding compact finite difference schemes of
high order are studied and compared. The first approach is
Numerov's idea of using the equation to transfer higher
derivatives to lower order ones for the Helmholtz equation,
or, for the wave equation, from time to space. The second
principle is the method of deferred correction, where a
lower order approximation is used for error correction.
For the time-independent Helmholtz problem, sharp estimates
for the error is derived, in order to compare the
arithmetic complexity for both approaches with a
non-compact scheme. The characteristics of the errors for
fourth order as well as sixth order accuracy are
demonstrated and the advantages and disadvantages of the
methods are discussed.
A time compact, Numerov-type, fourth order method and a
fourth order method using deferred correction in time are
studied for the wave equation. Schemes are derived for both
the second order formulation of the equation, and for the
system in first order form. Stability properties are
analyzed and numerical experiments have been performed, for
both constant and variable coefficients in the equations.
For the first order formulation, a staggered grid is used.
}
}
@PhDThesis{ itlic:2001-016,
author = {Furun{\"a}s {\AA}kesson, Johan},
title = {Interprocess Communication Utilising Special Purpose
Hardware},
school = it,
department = docs,
year = 2001,
number = {2001-016},
type = {Licentiate thesis},
month = dec,
abstract = {Real-Time Systems are computer systems with constraints on
the timing of actions. To ease the development and
maintenance of application software, Real-time Systems
often make use of a Real-Time Operating System (RTOS). Its
main task is scheduling of application processes (tasks).
Other functions can be inter process communication,
interrupt handling, memory management etc.
Sometimes it is hard (or even impossible) to meet the time
constraints specified for a real-time system, resulting in
an incorrectly functioning application. A possible remedy
is to redesign the system by upgrading the processor and/or
remove functionality, etc. An alternative solution could be
the use of a special purpose hardware accelerated RTOS. The
aim of such an accelerator is to speedup RTOS functions
that impose big overhead i.e. to reduce the kernel overhead
by offloading the application processor. Accordingly, the
processor gets more time for executing application
software, and hopefully the time constraints can be met.
The main drawback is the cost of extra hardware.
This thesis presents results from implementing RTOS
functions in hardware, especially inter process
communication (IPC) functions. The types of systems
considered are uniprocessor and shared memory
multiprocessor real-time systems.
IPC is used in systems with co-operating processes. The
operating systems on the market support a large variation
of IPC mechanisms. We will here present and evaluate three
different IPC implementations. The first is an extended
message queue mechanism that is used in commercial robot
control applications. The second is the signal mechanism in
OSE, a commercial RTOS predominantly used in
telecommunication control applications, and the third is
the semaphore and message queue mechanisms supported by the
leading commercial RTOS VxWorks. All the implementations
are based on a pre-emptive priority-based hardware
real-time kernel accelerator.
We show that it is not optimal, practical or desirable to
implement every kernel function into hardware, regarding
systems in the scope of this thesis. However, an
accelerator allows new functionality to be implemented. We
illustrate this by implementing a message queue mechanism
that supports priority inheritance for message arrival in
hardware, which is too expensive to implement in software.
Also, we show that substantial speedups are possible, and
that a crucial mechanism in achieving speedup is the
accelerator-processor interconnect. We further note that
application speedups are possible, even in cases with an
IPC-mechanism slow-down. The main reasons for this is that
the accelerator can off-load the processor by handling the
RTOS timing mechanism (clock-ticks), reducing the RTOS code
to be executed on the processor, and handling interrupts.
}
}
@PhDThesis{ itlic:2001-015,
author = {Stefan S{\"o}derberg},
title = {A Parallel Block-Based {PDE} Solver with Space-Time
Adaptivity},
school = it,
department = tdb,
year = 2001,
month = dec,
number = {2001-015},
type = {Licentiate thesis},
abstract = {A second order space and time adaptive method for parallel
solution of hyperbolic PDEs on structured grids is
presented. The grid is adapted to the underlying solution
by successive refinement in blocks. Therefore, there may be
jumps in the cell size at the block faces. Special
attention is needed at the block boundaries to maintain
second order accuracy and stability. The stability of the
method is proven for a model problem.
The step sizes in space and time are selected based on
estimates of the local truncation errors and an error
tolerance provided by the user. The global error in the
solution is also computed by solving an error equation
similar to the original problem on a coarser grid.
The performance of the method depends on the number of
blocks used in the domain. A method of predicting the
optimal number of blocks is presented. The cells are
distributed in blocks over the processor set using a number
of different partitioning schemes.
The method is used to successfully solve a number of test
problems where the method selects the appropriate space and
time steps according to the error tolerance. }
}
@PhDThesis{ itlic:2001-014,
author = {Abraham Zemui},
title = {Fourth Order Symmetric Finite Difference Schemes for the
Wave Equation},
school = it,
department = tdb,
year = 2001,
number = {2001-014},
type = {Licentiate thesis},
month = dec,
abstract = {The solution of the acoustic wave equation in one space
dimension is studied. The PDE is discretized using finite
element approximation. A cubic piecewise Lagrange
polynomial is used as basis. Consistent finite element and
lumped mass schemes are obtained. These schemes are
interpreted as finite difference schemes. Error analysis is
given for these finite differences (only for constant
coefficients).}
}
@PhDThesis{ itlic:2001-013,
author = {Alexandre David},
title = {Practical Verification of Real-time Systems},
type = {Licentiate thesis},
school = it,
department = docs,
year = 2001,
number = {2001-013},
month = oct,
abstract = {Formal methods are becoming mature enough to be used on
non trivial examples. They are particularly well fitted for
real-time systems whose correctness is defined in terms of
correct responses at correct times. Most common real-time
systems are of reasonable size and can therefore be handled
by an automatic verification tool such as \textsc{Uppaal}.
Unfortunately the application of such techniques is not
widely spread.
This thesis presents advances in making formal techniques
more accessable technology for system development and
analysis. As the first contribution, we report on an
industrial case study to show that model checkers can be
used for debugging and error localization. We shall present
a number of abstraction techniques applied in the case
study to avoid the state explosion problem. As the second
contribution, we have developed a hierarchical extension of
timed automata to enable more structured, compact, and more
complex descriptions of systems by the users. Such a
hierarchical representation is better suited for
abstraction and expected to give better searching
algorithms. Finally we present a hybrid animation system
serving as a plug-in module for model-checkers to improve
features for modelling and simulation. }
}
@PhDThesis{ itlic:2001-012,
author = {Per {\AA}hgren},
title = {Teleconferencing, System Identification and Array
Processing},
school = it,
department = syscon,
year = 2001,
number = {2001-012},
type = {Licentiate thesis},
month = oct,
abstract = {The area of teleconferencing has long yielded great
interest in the signal processing community. The main
reasons for this are probably the huge interest from the
industry and the challenging problems of the topic. The
problems of teleconferencing are relevant for several
different disciplines in signal processing. Three of these
are Acoustic Echo Cancellation, System Identification and
Sensor Array Signal Processing. In this thesis some
problems related to these disciplines are studied. The
thesis is divided into 6 parts, one for each paper included.
In the first part a new adaptive algorithm is applied to
the acoustic echo cancellation problem. It is shown to
perform much better than the Normalized Least Mean Squares
(NLMS) algorithm and while it performs worse than the
standard Recursive Least Squares (RLS) algorithm it is
shown to be computationally simpler than this.
In the second part the hierarchical RLS algorithm is
analyzed. The extraordinary results presented for this
algorithm in previous papers are discussed and explained.
In the third part a new initialization method for RLS is
presented that yields the exact Least Squares estimates
while not being computationally more demanding than RLS.
This is an important contribution since the standard
initialization of the RLS algorithm is somewhat arbitrary.
In the fourth part a method is presented that deals with
the problem of estimating the common factors out of an
arbitrary number of polynomials. Two problems of array
processing and system identification are stated as problems
for common factor estimation and the presented method is
applied to these. For these two problems the method is
shown to perform better than existing methods.
In the fifth part a method for beamforming using few
sensors is presented. Data-dependent beamformers usually
perform badly when there are few sensors in the array,
particularly when the beamformer constraints are numerous.
The method presented deals with this problem by
approximately fulfilling the beamformer constraints and
hence getting extra degrees of freedom for suppressing
interferences.
In the sixth part the previously unsolved problem of array
processing of non-zero mean signals is solved for the
colored noise case. Methods are presented both for the
estimation problem and the detection problem and are shown
to perform well in numerical examples. }
}
@PhDThesis{ itlic:2001-011,
author = {P{\"a}r Samuelsson},
title = {Modelling and control of activated sludge processes with
nitrogen removal},
school = it,
department = syscon,
year = 2001,
number = {2001-011},
type = {Licentiate thesis},
month = aug,
abstract = {Stricter legislations on nitrogen removal together with
increasing wastewater loads have successively increased the
need for optimization and control of activated sludge
processes. The aim of this thesis is primarily to propose
and illustrate methods for modelling and control of
activated sludge processes, and thus possibly improve
process efficiency. The first part of this thesis describes
a JAVA based simulator of the activated sludge process. Its
features are discussed, and some control strategies are
illustrated. The second part of the thesis presents a
simple model based feedforward controller for controlling
the nitrate level by using an external carbon source.
Several simulation examples are used to illustrate the
control law. In the third part, a model based strategy for
control of the ammonium level by manipulating the aeration
volume in the activated sludge process is presented. The
strategy is based on exact linearization combined with some
logical programming rules in order to deal with discrete
volume changes. The control law is also evaluated in
simulations.}
}
@PhDThesis{ itlic:2001-010,
author = {Johan Edlund},
title = {A Parallel, Iterative Method of Moments and Physical
Optics Hybrid Solver for Arbitrary Surfaces},
school = it,
department = tdb,
year = 2001,
number = {2001-010},
type = {Licentiate thesis},
month = aug,
abstract = {We have developed an MM-PO hybrid solver designed to
deliver reasonable accuracy inexpensively in terms of both
CPU-time and memory demands. The solver is based on an
iterative block Gauss-Seidel process to avoid unnecessary
storage and matrix computations, and can be used to solve
the radiation and scattering problems for both disjunct and
connected regions. It supports thin wires and dielectrica
in the MM domain and has been implemented both as a serial
and parallel solver.
Numerical experiments have been performed on simple objects
to demonstrate certain keyfeatures of the solver, and
validate the positive and negative aspects of the MM/PO
hybrid. Experiments have also been conducted on more
complex objects such as an model aircraft, to demonstrate
that the good results from the simpler objects are
transferrable to the real life situation. The complex
geometries have been used to conduct tests to investigate
how well parallelised the code is, and the results are
satisfactory. }
}
@PhDThesis{ itlic:2001-009,
author = {Johan Bengtsson},
title = {Efficient Symbolic State Exploration of Timed Systems:
Theory and Implementation},
school = it,
department = docs,
year = 2001,
number = {2001-009},
type = {Licentiate thesis},
month = may,
abstract = {Timing aspects are important for the correctness of
safety-critical systems. It is crucial that these aspects
are carefully analysed in designing such systems. UPPAAL is
a tool designed to automate the analysis process. In
UPPAAL, a system under construction is described as a
network of timed automata and the desired properties of the
system can be specified using a query language. Then UPPAAL
can be used to explore the state space of the system
description to search for states violating (or satisfying)
the properties. If such states are found, the tool provides
diagnostic information, in form of executions leading to
the states, to help the desginers, for example, to locate
bugs in the design.
The major problem for UPPAAL and other tools for timed
systems to deal with industrial-size applications is the
state space explosion. This thesis studies the sources of
the problem and develops techniques for real-time model
checkers, such as UPPAAL, to attack the problem. As
contributions, we have developed the notion of committed
locations to model atomicity and local-time semantics for
timed systems to allow partial order reductions, and a
number of implementation techniques to reduce time and
space consumption in state space exploration. The
techniques are studied and compared by case studies. Our
experiments demonstrate significant improvements on the
performance of UPPAAL. }
}
@PhDThesis{ itlic:2001-008,
author = {Markus Bylund},
title = {Personal Service Environments --- Openness and User
Control in User-Service Interaction},
school = it,
department = csd,
year = 2001,
number = {2001-008},
type = {Licentiate thesis},
month = jun,
abstract = {This thesis describes my work with making the whole
experience of using electronic services more pleasant and
practical. More and more people use electronic services in
their daily life - be it services for communicating with
colleagues or family members, web-based bookstores, or
network-based games for entertainment. However, electronic
services in general are more difficult to use than they
would have to be. They are limited in how and when users
can access them. Services do not collaborate despite
obvious advantages to their users, and they put the
integrity and privacy of their users at risk.
In this thesis, I argue that there are structural reasons
for these problems rather than problems with content or the
technology per se. The focus when designing electronic
services tends to be on the service providers or on the
artifacts that are used for accessing the services. I
present an approach that focus on the user instead, which
is based on the concept of personal service environments.
These provide a mobile locale for storing and running
electronic services of individual users. This gives the
user increased control over which services to use, from
where they can be accessed, and what personal information
that services gather. The concept allows, and encourages,
service collaboration, but not without letting the user
maintain the control over the process. Finally, personal
service environments allow continuous usage of services
while switching between interaction devices and moving
between places.
The sView system, which is also described, implements
personal service environments and serves as an example of
how the concept can be realized. The system consists of two
parts. The first part is a specification of how both
services for sView and infrastructure for handling services
should be developed. The second part is a reference
implementation of the specification, which includes sample
services that adds to and demonstrates the functionality of
sView.}
}
@PhDThesis{ itlic:2001-007,
author = {Hans Norlander},
title = {Parameterization of State Feedback Gains for Pole
Assignment},
school = it,
department = syscon,
year = 2001,
number = {2001-007},
type = {Licentiate thesis},
month = may,
abstract = {The pole assignment problem has been subject for research
for a long time. For single-input single-output systems
this problem is well understood but for multi-input
multi-output systems the pole assignment problem is more
complex.
In this thesis a parameterization of state feedback gains
for pole assignment is characterized with respect to
completeness, redundancy and existence. In order to make a
systematic examination of this parameterization a number of
classes are introduced.
This parameterization depends on two matrices that can be
regarded as design parameters. In the thesis it is shown
how the degree of freedom in the pole assignment problem
for multi-input systems is characterized by these two
matrices.
It turns out that the properties of the parameterization
depends on whether the characteristic polynomials of the
open and the closed loop systems are coprime or not. It is
shown in the thesis that if the characteristic polynomials
are coprime, every possible feedback gain can be
parameterized in this way, and in this sense the
parameterization is complete. If the characteristic
polynomials have factors in common the parameterization is
not complete. In the thesis the shortcomings of the
parameterization for this case are characterized.
The design parameters seem to offer a greater degree of
freedom than what can be offered in the pole assignment
problem. This indicates a certain degree of
overparameterization. This redundancy in the design
parameters is characterized in the thesis.
The parameterization implies that a certain matrix is
invertible. Necessary conditions for when this matrix is
invertible are given in terms of the two design parameters.
It is shown that this matrix is invertible for almost every
value of the design parameters when the characteristic
polynomials are coprime, and hence that the parameterized
gains are generally applicable.
The parameterization and its properties are illustrated on
a linear model of the military aircraft JAS Gripen. }
}
@PhDThesis{ itlic:2001-006,
author = {Bengt G{\"o}ransson},
title = {Usability Design: A Framework for Designing Usable
Interactive Systems in Practice},
school = it,
department = hci,
year = 2001,
number = {2001-006},
type = {Licentiate thesis},
month = apr,
abstract = {Today many companies have started to become aware of the
advantages of doing user-centred design. However, it is
extremely rare that companies adopt a fully integrated
user-centred design approach in one strategic shift.
Rather, companies tend to adopt practices and methods in
stages or adopt a particular method or practice only when a
complex set of factors align to create readiness. There is
a big market for companies vending usability methods, but
poor usability of systems and products is still very
common, the vendors often blaming it on factors outside
their immediate influence. This among other things is a
call for us to work for a user-centred design attitude as a
major strategy for the system development process.
The content of this thesis is dedicated to the question of
how to develop usable interactive systems in practice. Main
focus is on how we can raise the awareness of usability;
articulate the need for user-centred design within the
industry and development organisations; and practice
user-centred design. A framework for usability design as an
unpretentious way of applying user-centred design is
described and discussed.}
}
@PhDThesis{ itlic:2001-005,
author = {Per Carlsson},
title = {Market and Resource Allocation Algorithms with Application
to Energy Control},
school = it,
department = csd,
year = 2001,
number = {2001-005},
type = {Licentiate thesis},
month = apr,
abstract = {The energy markets of today are markets with rather few
active participants. The participants are, with few
exceptions, large producers and distributors. The market
mechanisms that are used are constructed with this kind of
a market situation in mind. With an automatic or
semiautomatic approach, the market mechanism would be able
to incorporate a larger number of participants. Smaller
producers, and even consumers, could take an active part in
the market. The gain is in more efficient markets, and ---
due to smaller fluctuations in demand --- better resource
usage from an environmental perspective.
The energy markets of the Nordic countries (as well as many
others) were deregulated during the last few years. The
change has been radical and the situation is still rather
new. We believe that the market can be made more efficient
with the help of the dynamics of the small actors.
The idealised world of theory (of economics) often relies
on assumptions such as continuous demand and supply curves.
These assumptions are useful, and they do not introduce
problems in the power market situation of today, with
relatively few, large, participants. When consumers and
small producers are introduced on the market, the situation
is different. Then it is a drawback if the market mechanims
cannot handle discontinuous supply and demand.
The growth in accessibility to computational power and data
communications that we have experienced in the last years
(and are experiencing) could be utilised when constructing
mechanisms for the energy markets of tomorrow.
In this thesis we suggest a new market mechanism,
\textsc{ConFAst}, that utilises the technological progress
to make it possible to incorporate a large number of active
participants on the market. The mechanism does not rely on
the assumptions above. The gain is a more efficient market
with less fluctuations in demand over the day.
To make this possible there is a need for efficient
algorithms, in particular this mechanism relies on an
efficient aggregation algorithm. An algorithm for
aggregation of objective functions is part of this thesis.
The algorithm handles maximisation with non concave, even
noisy, objective functions. Experimental results show that
the approach, in practically relevant cases, is
significantly faster than the standard algorithm. }
}
@PhDThesis{ itlic:2001-004,
author = {Bengt Eliasson},
title = {Numerical Simulation of Kinetic Effects in Ionospheric
Plasma},
school = it,
department = tdb,
year = 2001,
number = {2001-004},
type = {Licentiate thesis},
month = apr,
abstract = {In this thesis, we study numerically the one-dimensional
Vlasov equation for a plasma consisting of electrons and
infinitely heavy ions. This partial differential equation
describes the evolution of the distribution function of
particles in the two-dimensional phase space $(x,v)$. The
Vlasov equation describes, in statistical mechanics terms,
the collective dynamics of particles interacting with
long-range forces, but neglects the short-range
``collisional'' forces. A space plasma consists of
electrically charged particles, and therefore the most
important long-range forces acting on a plasma are the
Lorentz forces created by electromagnetic fields. What
makes the numerical solution of the Vlasov equation to a
challenging task is firstly that the fully
three-dimensional problem leads to a partial differential
equation in the six-dimensional phase space, plus time,
making it even hard to store a discretized solution in the
computer's memory. Secondly, the Vlasov equation has a
tendency of structuring in velocity space (due to free
streaming terms), in which steep gradients are created and
problems of calculating the $v$ (velocity) derivative of
the function accurately increase with time. The method used
in this thesis is based on the technique of Fourier
transforming the Vlasov equation in velocity space and then
solving the resulting equation. We have developed a method
where the small-scale information in velocity space is
removed through an outgoing wave boundary condition in the
Fourier transformed velocity space. The position of the
boundary in the Fourier transformed variable determines the
amount of small-scale information saved in velocity space.
The numerical method is used to investigate a phenomenon of
tunnelling of information through an ionospheric layer,
discovered in experiments, and to assess the accuracy of
approximate analytic formulae describing plasma wave
dispersion. The numerical results are compared with
theoretical predictions, and further physical experiments
are proposed. }
}
@PhDThesis{ itlic:2001-003,
author = {Erik K. Larsson},
title = {On Identification of Continuous-Time Systems and Irregular
Sampling},
school = it,
department = syscon,
year = 2001,
number = {2001-003},
type = {Licentiate thesis},
month = mar,
abstract = {The problem of identifying continuous-time systems is of
fundamental interest in various areas, such as,
astrophysics, economics, control and signal processing.
Over the years there has been an extensive research going
on in this field, which has resulted in numerous
publications. The most obvious reason for working with
continuous-time systems is that most physical systems are
inherently continuous in time. Therefore, the parameters in
the models often have a physical interpretation.
In this thesis some specific problems concerning
identification of continuous-time autoregressive (CAR)
processes are studied. The main approach is based on
replacing the differentiation operator with some
approximations and forming a discrete-time linear
regression model. The continuous-time system parameters are
then obtained by using the least squares method. It is,
however, well known that this approach will result in
biased estimates unless some modifications are introduced.
The first part of the thesis explores the possibility to
extend some of the existing methods for identifying
CAR-processes, using the approach described above, to the
case of unevenly sampled data. Some computationally very
efficient methods are presented.
In the second part of the thesis a simple method for
computing the CRB for CAR-processes, given arbitrary
sampling patterns, is introduced. Several simulation
studies are considered with some interesting results.
In the third part of the thesis the problem of identifying
CAR-processes, using limiting properties of sampled
stochastic systems, is addressed. The presented method is
intuitively clear and numerically sound, it is based on
some new results regarding sampling of CAR-processes that
are presented as well. }
}
@PhDThesis{ itlic:2001-002,
author = {Johan Steensland},
title = {Domain-based partitioning for parallel {SAMR}
applications},
school = it,
department = tdb,
year = 2001,
number = {2001-002},
type = {Licentiate thesis},
month = mar,
abstract = {This thesis presents a study of domain-based partitioning
techniques for dynamic structured grid hierarchies,
occuring in structured adaptive mesh refinement (SAMR)
methods. Such methods for obtaining the numerical solution
to partial differential equations yield highly advantageous
ratios for cost/accuracy as compared to methods based upon
static uniform approximations. Distributed implementations
of these techniques have the potential for enabling
realistic simulations of complex systems. These
implementations however, present significant challenges in
dynamic data-distribution and load balancing. That is, when
SAMR is executed on a parallel computer, the work load will
change dynamically. Thus, there is need for
\textit{dynamic} load balancing in order to efficiently
utilize the resourses of the parallel computer. Inverse
space-filling curve partitioning (ISP) is appealing for
load balancing in parallel SAMR, because of its speed. In
this work, ISP is considered as \textit{part} of a
partitioning approach, which combines structured and
unstructured techniques.
Various design decisions for the structured partitioning
are considered. Different design choices lead to graphs
with different properties. One objective is to investigate
how these differences affect the behavior of ISP. This
thesis contributes by identifying certain design choices as
being advantageous, and by presenting four new partitioning
algorithms that correspond to these design decisions.
An experimental comparison of dynamic partitioning
techniques for \textit{blockwise} parallel structured
adaptive mesh refinement applications is presented. It is
shown that an ISP-based technique can compete with other
approaches for certain classes of applications. Moreover,
an extended experimental characterization of dynamic
partitioning\-/load-balancing techniques for
\textit{general} adaptive grid hierarchies is presented.
Techniques studied include newly proposed as well as
existing approaches. It was found that two of the proposed
algorithms were particularly useful: pBD-ISP for the more
communication dominated applications, and G-MISP+SP for the
computation dominated applications.
Policies for the automatic selection of partitioner based
on application and system state are outlined.
Recommendations for appropriate partitioning techniques,
given application and system state, are given. The overall
motivation is the formulation of policies required to drive
a \textit{dynamically adaptive} meta-partitioner for SAMR
grid hierarchies capable of selecting the most appropriate
partitioning strategy at runtime, based on current
application and system state. Such a partitioner may
significantly decrease application execution time.}
}
@PhDThesis{ itlic:2001-001,
author = {Erik Bor{\"a}lv},
title = {Design and Usability in Telemedicine},
school = it,
department = hci,
year = 2001,
number = {2001-001},
type = {Licentiate thesis},
month = feb,
abstract = {A design of computer systems, that effectively supports
the user, is a major goal within human-computer
interaction. To achieve this, we must understand and master
several tasks. These tasks concern firstly what to develop
and secondly how to develop the system.
The design and implementation of effective and efficient
user interfaces is a prerequisite for the successful
introduction of computer support in the medical domain. We
base our work on a fundamental understanding of cognitive
aspects of human-computer interaction, as well as on
detailed analysis of the specific needs and requirements of
the end users, i.e. the medical professionals.
This thesis presents several approaches for development of
systems for computer-supported work in health care. The
solutions described concern vital problem areas: (1) the
focus on the work tasks to be performed, (2) the cost of
software and the way competition works in a networked
world. Solutions to these problems can lead to more usable
systems from a user's perspective but may also change the
nature of computer applications. }
}
@PhDThesis{ itlic:2000-011,
author = {Bharath Bhikkaji},
title = {Model Reduction for Diffusion Systems},
school = it,
department = syscon,
year = 2000,
number = {2000-011},
type = {Licentiate thesis},
month = dec,
abstract = {Diffusion phenomena has been studied with a lot of
interest, for a long time, due to its historical and
practical significance. In the recent days it has thrown a
lot of interest among control engineers, as more and more
practical systems, varying from stock markets to
environmental pollution, have been observed to involve
diffusion.
Diffusion systems are normally modeled by linear partial
differential equations (LPDE's) of the form \[
\frac{\partial T(x,t)}{\partial x} = \mathcal{L}T(x,t)
\label{eq1.0} \] where $ \mathcal{L}$ is a second order
linear spatial differential operator and $ T(x,t) $ is the
physical quantity, whose variations in the spatial domain
cause diffusion. To characterise diffusion phenomena, one
has to obtain the solution of (\ref{eq1.0}) either
analytically or numerically. Note that, since (\ref{eq1.0})
involves a second order spatial operator and a first order
time derivative, one needs at least two boundary conditions
in the spatial domain, $x$, and an initial condition at
time $t=0$, for determining $ T(x,t) $.
LPDE's of the type (\ref{eq1.0}) can be interpreted as
infinite order linear time invariant systems (LTI systems)
with inputs as boundary conditions. To compute the solution
of (\ref{eq1.0}) numerically, one has to approximate,
explicitly or implicitly, the underlying infinite order
system by a finite order system. Any numerical scheme,
which computes the solution of (\ref{eq1.0}), essentially
approximates the underlying infinite order LTI system by a
finite order LTI system. The efficiency of the
approximation, for a given problem, varies for the
different numerical schemes.
In this thesis, we make an attempt to explore more about
diffusion systems in general. As a starting point, we
consider a simple case of one dimensional heat diffusion
across a homogeneous region. The resulting LPDE is first
shown explicitly to be an infinite order dynamical system.
An approximate solution is computed from a finite order
approximation of the true infinite order dynamical system.
In this thesis, we first construct the finite order
approximations using certain standard PDE solvers based on
Chebyshev polynomials. From these finite order
approximations we choose the best one, from a model
reduction perspective, and use it as a benchmark model. We
later construct two more approximate models, by exploiting
the given structure of the problem and we show by
simulations that these models perform better than the
chosen benchmark. }
}
@PhDThesis{ itlic:2000-010,
author = {Markus Lindgren},
title = {Measurement and Simulation Based Techniques for Real-Time
Analysis},
school = it,
department = docs,
year = 2000,
type = {Licentiate thesis},
number = {2000-010},
month = dec,
note = {Also published as report MRTC 00/25 at M{\"a}lardalens
h{\"o}gskola},
abstract = {Rigorous methods for design and implementation of safety
critical real-time systems are vital to avoid loss of human
lives and/or severe economic losses. Unfortunately, many of
these systems are designed and evaluated using ad-hoc
techniques. There are, on the other hand, relatively well
developed scientific theories for modeling and analysis of
timing and reliability. These theories are, however, only
very slowly being introduced in industrial development.
Important reasons for this are the simplifying model
assumptions and lack of appropriate tools for timing
analysis.
This thesis presents two new methods aimed to narrow the
gap between scientific results and industrial practice in
evaluation and design of real-time systems.
The first contribution is a method that from execution time
measurements on the target system can derive worst-case
execution time estimates of programs. Such estimates are
essential when verifying if a system fulfills its timing
requirements. The second contribution is a simulation based
technique that can be used to evaluate timing aspects of
distributed real-time systems, as well as calculating
reliability estimates of these systems. Such estimates are
essential in determining if a system meets its requirements
sufficiently well. Compared to proposed analytical methods
for execution time analysis and schedulability analysis,
the starting point for both these methods are real target
systems, rather than an abstract model with limited
correspondance to reality.
The presented initial case-studies give clear evidence that
the proposed methods have potential of being both
applicable and useful. }
}
@PhDThesis{ itlic:2000-009,
author = {Jan Nystr{\"o}m},
title = {A formalisation of the {ITU-T} {I}ntelligent {N}etwork
standard},
school = it,
department = docs,
year = 2000,
type = {Licentiate thesis},
number = {2000-009},
month = dec,
abstract = {Telecommunication systems are today among the largest and
most heterogeneous computer systems that exist. The
functionality offered by them is rapidly increasing, by
numerous features: call waiting, credit-card billing and
call-forwarding to name a few.
The realisation of extra services poses a challenge to
implementors, in particular since different services are
developed at different times and places, by different
people. Not only is the number and complexity of services
increasing but some vendors want to enable users to tailor
their own services and ultimately design them entirely.
This of course calls for rigorous control of the services
so that they do not come into conflict with the interest of
the vendors, other users or surprise the users with their
behaviours.
One way of aiding the service designers is to provide a
service creation environment containing a set of well
defined building blocks that would be uniform for all
features, irrespective of vendor or service. Such an
environment also needs tool support for writing, checking,
validating and analysing for possible conflicts.
We have constructed a formalism for compactly specifying
the interface behaviour of the switching and service logic
system underlying a service creation environment, and for
specifying the behaviour of components of an environment
for service creation. For this formalism we supply tool
support in the form of a simulator.
We have further made a formalisation, in our framework, of
the ITU-T Intelligent Network model, Capability Set-1. The
formalisation concerns both the underlying service
architecture, in which service logic is perform by a
dedicated Service Control Function, and the component
language, in which Service Independent Building Blocks are
composed to yield new services. }
}
@PhDThesis{ itlic:2000-008,
author = {Marcus Nilsson},
title = {Regular Model Checking},
school = it,
department = docs,
year = 2000,
type = {Licentiate thesis},
number = {2000-008},
month = dec,
abstract = {We present \emph{regular model checking}, a framework for
algorithmic verification of infinite-state systems with,
e.g., queues, stacks, integers, or a parameterized linear
topology. States are represented by strings over a finite
alphabet and the transition relation by a regular
length-preserving relation on strings. Both sets of states
and the transition relation are represented by regular
sets. Major problems in the verification of parameterized
and infinite-state systems are to compute the set of states
that are reachable from some set of initial states, and to
compute the transitive closure of the transition relation.
We present an automata-theoretic construction for computing
a non-finite composition of regular relations, e.g., the
transitive closure of a relation. The method is incomplete
in general, but we give sufficient conditions under which
it works. We show how to reduce model checking of
$\omega$-regular properties of parameterized systems into a
non-finite composition of regular relations. We also report
on an implementation of regular model checking, based on a
new package for non-deterministic finite automata. }
}
@PhDThesis{ itlic:2000-007,
author = {Magnus Larsson},
title = {Applying Configuration Management Techniques to
Component-Based Systems},
school = it,
department = docs,
year = 2000,
number = {2000-007},
type = {Licentiate thesis},
month = dec,
note = {Also published as report MRTC 00/24 at M{\"a}lardalens
h{\"o}gskola},
abstract = {Building software from components, rather than writing the
code from scratch has several advantages, including reduced
time to market and more efficient resource usage. However,
component based development without consideration of all
the risks and limitations involved may give unpredictable
results, such as the failure of a system when a component
is used in an environment for which it was not originally
designed.
One of the basic problems when developing component-based
systems is that it is difficult to keep track of components
and their interrelationships. This is particularly
problematic when upgrading components. One way to maintain
control over upgrades is to use component identification
and dependency analysis. These are well known techniques
for managing system configurations during development, but
are rarely applied in managing run-time dependencies. The
main contribution of this thesis is to show how
Configuration Management (CM) principles and methods can be
applied to component-based systems.
This thesis presents a method for analysing dependencies
between components. The method predicts the influence of a
component update by identifying the components in a system
and constructing a graph describing their dependencies.
Knowledge of the possible influences of an update is
important, since it can be used to limit the scope of
testing and be a basis for evaluating the potential damage
of the update. The dependency graphs can also be used to
facilitate maintenance by identifying differences between
configurations, e.g., making it possible to recognise any
deviations from a functioning reference configuration.
For evaluation of the method, a prototype tool which
explores dependencies and stores them under version control
has been developed. The prototype has been used for partial
analysis of the Windows 2000 platform, for which it has
been important to remain aware of dynamic dependencies.
Preliminary experiments indicate that most components have
only a few dependencies. The method has thus given an
indication that the analysis of the effects of component
updates may not be as difficult as might be expected.}
}
@PhDThesis{ itlic:2000-006,
author = {Gustaf Naeser},
title = {A Flexible Framework for Detection of Feature Interactions
in Telecommunication Systems},
school = it,
department = docs,
year = 2000,
number = {2000-006},
type = {Licentiate thesis},
month = oct,
abstract = {The complexity of today's telecommunications systems grows
with each new feature introduced. As the number of services
an operator provides can be used to gain advantage over
competitors the number of services will continue to
increase. As the complexity grows, so does the possibility
for feature interactions, situations where the operation of
features interfere with the operation of other features.
Interactions cost more to resolve the later during the
introduction of a new feature they are detected. This makes
the need for analysis of feature interactions during
development preeminent.
There exists a multitude of frameworks and techniques for
specification and analysis of models of telecommunications
systems. However, we have identified that they often impose
unnecessary design decisions on the models, making them
untoward.
In this thesis we propose a framework for specification of
models of telecommunications systems. The framework is
designed to describe them as general systems of
communicating processes in a flexible way which allows
alterations to be made late during the design. We also
present definitions of interactions and how the interaction
definitions are used in the framework to detect undesired
interactions.
A model for telephony systems is derived through
observations made of common telephony concepts and
constructs. Delving into concepts early in the design of
the system is shown to avoid several sources of
interactions.
To demonstrate the potential of the framework we present a
case study where the system and services described in the
first interaction detection contest has been implemented
and searched for interactions. }
}
@PhDThesis{ itlic:2000-005,
author = {Fredrik Edelvik},
title = {Finite Volume Solvers for the {M}axwell Equations in Time
Domain},
school = it,
department = tdb,
year = 2000,
number = {2000-005},
type = {Licentiate thesis},
month = oct,
abstract = {Two unstructured finite volume solvers for the Maxwell
equations in 2D and 3D are introduced. The solvers are a
generalization of FD-TD to unstructured grids and they use
a third-order staggered Adams--Bashforth scheme for time
discretization. Analysis and experiments of this time
integrator reveal that we achieve a long term stable
solution on general triangular grids. A Fourier analysis
shows that the 2D solver has excellent dispersion
characteristics on uniform triangular grids. In 3D a
spatial filter of Laplace type is introduced to enable long
simulations without suffering from late time instability.
The recursive convolution method proposed by Luebbers et
al. to extend FD-TD to permit frequency dispersive
materials is here generalized to the 3D solver. A better
modelling of materials which have a strong frequency
dependence in their constitutive parameters is obtained
through the use of a general material model. The finite
volume solvers are not intended to be stand-alone solvers
but one part in two hybrid solvers with FD-TD. The
numerical examples in 2D and 3D demonstrate that the hybrid
solvers are superior to stand-alone FD-TD in terms of
accuracy and efficiency. }
}
@PhDThesis{ itlic:2000-004,
author = {Anders Wall},
title = {A Formal Approach to Analysis of Software Architectures
for Real-Time Systems},
school = it,
department = docs,
year = 2000,
number = {2000-004},
type = {Licentiate thesis},
month = sep,
note = {Also published as report MRTC 00/21 at M{\"a}lardalens
h{\"o}gskola},
abstract = {A software architecture is a high-level design description
of a software system. In terms of the architecture, early
design decisions can be analyzed to improve the quality of
a real time software system, which depends very much on how
it is structured rather than how it is implemented.
Architectural analysis techniques vary in their degree of
formality. The least formal is based on reviews and
scenarios, whereas the most formal analysis methods are
based on mathematics. In this thesis, we propose to use a
formal approach to software architectural analysis. We use
networks of timed automata to model the architecture of
real time systems and transform architectural analysis
problems to reachability problems that can be checked by
the existing tools for timed automata. The typical
properties that can be handled using this approach are
schedulability and safety properties. As the first
technical contribution, we extend the classic model of
timed automata with a notion of real time tasks. This
yields a general model for real-time systems in which tasks
may be periodic and non-periodic. We show that the
schedulability problem for the extended model can be
transformed to a reachability problem for standard timed
automata, and thus it can be checked by existing
model-checking tools, e.g. UPPAAL for timed automata. As
the second contribution, we present a method to check
general high level temporal requirements e.g. timing
constraints on data flowing in multi-rate transactions,
which can not be handled by traditional approach to
schedulability analysis. We have developed an algorithm
that given a data dependency model and a schedule for a
transaction constructs a timed automaton describing the
behavior of the transaction. Thus, by using existing
verification tools we can verify that a given architecture
is schedulable and more importantly, it is correctly
constructed with respect to the high level temporal
constraints. }
}
@PhDThesis{ itlic:2000-003,
author = {Fredrik Larsson},
title = {Efficient Implementation of Model-Checkers for Networks of
Timed Automata},
school = it,
department = docs,
year = 2000,
number = {2000-003},
type = {Licentiate thesis},
month = may,
abstract = {Since real-time systems often operate in safety-critical
environments it is extremely important that they function
correctly. \textsc{uppaal} is a tool that can be used for
validation and verification of real-time systems. The user
models the system using networks of timed automata and uses
a simple logic to express safety requirements that the
modelled system must satisfy to guarantee its correct
behaviour. \textsc{uppaal} then performs reachability
analysis using constraint solving techniques to check if
the model satisfies the given requirements. In addition,
the tool is also able to provide the user with a sample
execution that explains why a requirement is (or is not)
satisfied by the model. The analysis is fully automated.
This thesis describes various techniques adopted when
implementing \textsc{uppaal}. Some of he techniques have
improved the performance of \textsc{uppaal} significantly.
We have studied the techniques with performance
measurements in several case-studies. One of the main
contributions is the comparison of different strategies in
implementing the basic data structures and searching
algorithms. The measurements can be used as hints on what
parts of the model-checker that are most important to
optimise. Though the techniques are studied in the context
of timed automata, we believe that they are applicable to
the implementation of general software tools for automated
analysis. }
}
@PhDThesis{ itlic:2000-002,
author = {Susanne Remle},
title = {Modeling and Parameter Estimation of the Diffusion
Equation},
school = it,
department = syscon,
year = 2000,
number = {2000-002},
type = {Licentiate thesis},
month = may,
abstract = {In many applications such as heat diffusion and flow
problems, it is of interest to describe the process
behavior inside a particular medium. An example can be the
strive for estimating certain parameters related to the
material. These processes are often modeled by a partial
differential equation. Certain methods for identifying
unknown material constants require the model to be of
finite order. This thesis describes how the diffusion
process can be approximated with finite order model, and
how the accuracy of an estimated model depends on the model
order. In particular, a detailed analysis is carried out
for the case when the approximate model accounts for
solving the diffusion by a difference method. }
}
@PhDThesis{ itlic:2000-001,
author = {Katarina Boman},
title = {Low-Angle Estimation: Models, Methods and Bounds},
school = it,
department = syscon,
year = 2000,
type = {Licentiate thesis},
number = {2000-001},
month = feb,
abstract = {In this work we study the performance of elevation
estimators and lower bounds on the estimation error
variance for a low angle target in a smooth sea scenario
using an array antenna.
The article is structured around some key assumptions on
multipath knowledge, signal parameterization and noise
covariance, giving the reader a framework in which
Maximum-Likelihood estimators exploiting different {\'a}
priori information can be found.
The crucial factor that determines the estimator accuracy
is the multipath modeling, and there are three alternative
levels of knowledge that can be used: 1) two unknown target
locations 2) the target and its corresponding
sea-reflection are related via simple geometry 3) the sea
reflection coefficient is known as a function of grazing
angle.
A compact expression for the Cram{\'e}r-Rao lower bound is
derived, including all special cases of the key
assumptions. We prove that the Cram{\'e}r-Rao bound is
highly dependent on the multipath model, while it is the
same for the different signal parameterizations and that it
is independent of the noise covariance.
However, the Cram{\'e}r-Rao bound is sometimes too
optimistic and not achievable. The tighter Barankin bound
is derived to predict the threshold behavior seen at low
SNR. At high SNR the Barankin bound coincides with the
Cram{\'e}r-Rao bound. Simulations show that the Maximum
Likelihood methods are statistically efficient and achieve
the theoretical lower bound on error variance, in case of
high enough SNR.
The bounds are also useful tools to design an improved
array structure that can give better performance than the
standard uniform linear array structure. The influence of
the number of sensors and the number of snapshots on the
error variance is also studied, showing the rate of
improvement with more sensors or snapshots. Finally we
discuss the use of multiple frequencies, which is mainly a
tool for suppressing ambiguities. We show for which signal
models it provides improved performance. }
}