&2/#>+53=+=3?/$/98;=&2/#>+53=+=3?/$/98;=
(85>6/ >6,/; 8@&8;=3-5/

..;/<<371=2/2+55/71/<80!7537/(3./87+5B<3<37#>+53=+=3?/..;/<<371=2/2+55/71/<80!7537/(3./87+5B<3<37#>+53=+=3?/
%=>.3/<)8;4/.A+695/0;86869>=+=387+5&2374371%=>.3/<)8;4/.A+695/0;86869>=+=387+5&2374371
$/</+;-2$/</+;-2
2+;//7%7/5<87
83</%=+=/'73?/;<3=B
-<7/5<87,83</<=+=//.>
+C23*+71
83</%=+=/'73?/;<3=B
.+C23B+71,83</<=+=//.>
&8;;/7-/&/695/
83</%=+=/'73?/;<3=B
=8;B=/695/>,83</<=+=//.>
8558@=23<+7.+..3=387+5@8;4<+=2==9<7<>@8;4<78?+/.>=:;
"+;=80=2/>;;3->5>6+7.7<=;>-=38786687<5/6/7=+;B.>-+=38786687<+7.=2/!7537/
+7.3<=+7-/.>-+=38786687<
$/-866/7./."3=+=387$/-866/7./."3=+=387
%7/5<87*+71&/695/&..;/<<371=2/2+55/71/<80!7537/(3./87+5B<3<37
#>+53=+=3?/%=>.3/<)8;4/.A+695/0;86869>=+=387+5&2374371$/</+;-2
&2/#>+53=+=3?/$/98;=


2==9<.838;1
&23<8@&8;=3-5/3<,;8>12==8B8>08;0;//+7.89/7+--/<<,B=2/&2/#>+53=+=3?/$/98;=+= %')8;4<=2+<
,//7+--/9=/.08;37-5><38737&2/#>+53=+=3?/$/98;=,B+7+>=28;3C/.+.6373<=;+=8;80 %')8;4<8;68;/
3708;6+=38795/+</-87=+-=7<>@8;4<78?+/.>
..;/<<371=2/2+55/71/<80!7537/(3./87+5B<3<37#>+53=+=3?/%=>.3/<..;/<<371=2/2+55/71/<80!7537/(3./87+5B<3<37#>+53=+=3?/%=>.3/<
)8;4/.A+695/0;86869>=+=387+5&2374371$/</+;-2)8;4/.A+695/0;86869>=+=387+5&2374371$/</+;-2
,<=;+-=,<=;+-=
7=23<9+9/;@/<2+;/8>;+99;8+-2+7.=2/9;8-/<<08;:>+53=+=3?/+7+5B<3<8087537/?3./8.+=+;/-8;./.
.>;371+7+0=/;<-2885;8,8=3-<9;81;+6=2+=/692+<3C/.-869>=+=387+5=2374371&!7537/;/</+;-2
<=;+=/13/<6+B,/7/-/<<+;B08;?+;38><;/+<87<<>-2+<@2/7@8;4371@3=2+1/81;+923-+55B.3<=;3,>=/.
;/</+;-2=/+6@2/7-87.>-=371;/</+;-2@3=2<=>./7=<37+787537/9;81;+68;@2/7;/<8>;-/<+;/
37+--/<<3,5/.>/=8-+69><-58<>;/<534/=28<//A9/;3/7-/..>;371=2/!(9+7./63-)/08558@/.
+=2;//<=+1/9;8-/<<.>;371:>+53=+=3?/+7+5B<3<80=2/?3./8<=2+=37-5>./.95+77371+7.</=>987537/
+7+5B<3<80?3./8<+7.<=;>-=>;+5-8.371806/68<=8/A958;/9+==/;7<+-;8<<=2/.+=+7+5B<3<@+<
-87.>-=/.@3=2+-86,37+=38780=/-2785813/<37-5>.3718815/;3?/08;-855+,8;+=3?/-8.37187537/+7.
(3?8=8-855+=/+7.<>66+;3C/D7.371<&2/6/=28.<+7.9;8-/<<@/./<-;3,/+;/;/+.35B+9953-+,5/=8
8=2/;;/</+;-2<=>.3/<=2+=37-5>./?3./8+<9+;=80=2/.+=+</=
/B@8;.</B@8;.<
87537/?3./8:>+53=+=3?/?3./8+7+5B<3<-869>=+=387+5=2374371
;/+=3?/86687<3-/7</;/+=3?/86687<3-/7</
&23<@8;43<53-/7</.>7./;+;/+=3?/86687<==;3,>=387 87-866/;-3+5%2+;/534/7=/;7+=387+5
3-/7</
&23<28@=8+;=3-5/3<+?+35+,5/37&2/#>+53=+=3?/$/98;=2==9<7<>@8;4<78?+/.>=:;?853<<
The Qualitative Report 2021 Volume 26, Number 6, 1974-1988
https://doi.org/10.46743/2160-3715/2021.4734
Addressing the Challenges of Online Video Analysis in
Qualitative Studies: A Worked Example from Computational
Thinking Research
Chareen Snelson, Dazhi Yang, and Torrence Temple
Boise State University, USA
In this paper, we share our approach and the process for qualitative analysis of
online video data recorded during an after-school robotics program that
emphasized computational thinking (CT). Online research strategies may be
necessary for various reasons such as when working with a geographically
distributed research team, when conducting research with students in an online
program, or when resources are inaccessible due to campus closures like those
experienced during the COVID-19 pandemic. We followed a three-stage
process during qualitative analysis of the videos that included planning and
setup, online analysis of videos, and structural coding of memos to explore
patterns across the data. Analysis was conducted with a combination of
technologies including Google Drive for collaborative coding online and NVivo
to collate and summarize findings. The methods and process we describe are
readily applicable to other research studies that include video as part of the data
set.
Keywords: online video, qualitative video analysis, computational thinking
Introduction
Video in qualitative research is valuable as an audiovisual record of everyday events,
such as classroom interactions, that can be closely reviewed, replayed, and analyzed repeatedly
during an investigation. How video is used in qualitative research depends on factors such as
the goals of the study, experience of the research team, and how technologies for recording,
file management, and data analysis are selected and used (Derry et al., 2010; Fitzgerald et al.,
2013; Heath et al., 2010). In some studies, it may become necessary to perform some or all the
research activities online such as when research personnel are geographically distributed, when
conducting research with students enrolled in online programs, or when face-to-face research
is not feasible for some reason. A recent example is the COVID-19 pandemic, which drove
heightened interest in how to collect data online using videoconferencing applications when it
was more difficult to conduct research in person (Gray et al., 2020; Lobe et al., 2020).
Qualitative analysis of video files in an online space can be challenging given that technologies
designed for qualitative analysis, such as Computer Assisted Qualitative Data Analysis
Software (CAQDAS), vary in functionality and may have limited options for analysis of video
files stored online (Silver & Lewins, 2014; Silver & Patashnick, 2011). The aim of this
methodological paper is to share how we conducted video analysis online and what was learned
through our research experience and to promote discussion about strategies for research
involving online video data.
Video in Qualitative Educational Research
Video has an established role in qualitative and social research practice as a medium
through which an audiovisual record of everyday events, such as classroom interactions, can
Chareen Snelson, Dazhi Yang, and Torrence Temple 1975
be preserved and made available for close scrutiny, replay, and repeated analysis during an
investigation (Heath et al., 2010). In educational research, video has been conceptualized as
offering a window through which classroom interactions can be viewed, a lens to focus on
certain aspects of classroom activity, or a mirror for reflecting on teaching, learning, or research
practice (Clarke & Chan, 2019). The benefits of preserving those fleeting aspects of classroom
practice in video for research purposes are compelling, but also challenging due to existing
technologies and how they support or constrain our work, how much those technologies cost,
and how difficult they are to use. Over the years, film and video technologies have evolved
from bulky, complicated, and expensive systems to more affordable digital technologies that
are convenient to use in research endeavors. Erickson’s (2011) brief history of video in social
research offers a concise overview spanning several decades and illustrates how technologies
evolved and were integrated in research practice. Many of the early technologies were
cumbersome and weighty. For example, in a 1967 research study, Erickson used a 25-pound
camera that required “reels of tape that were an inch thick and about 16 inches in diameter” (p.
181). Now, smartphones, equipped with high-definition video cameras, are increasingly
common in society (Silver, 2019). The widespread availability of affordable portable digital
video technologies is advantageous for researchers who need quick and easy access to video
recording technologies.
The growing ubiquity of digital video recording technologies is helpful, but equipment
alone is not the only factor to consider when integrating a video component in research.
Researchers are faced with a variety of practical and methodological decisions when recording
video, managing video files, and analyzing video data (Derry et al., 2010). Heath et al. (2010)
described some of the challenges associated with acquiring video data that include gaining
access to the location where recording will occur, obtaining necessary permissions, deciding
how much video to record, identifying camera positioning and framing, and determining the
influence of the camera on participants. Similarly, Fitzgerald et al. (2013) drew on experience
with a video ethnography of science teaching practices to unpack how issues with sampling
(e.g., camera placement and what to capture on video), authenticity (e.g., influence of the
camera on participant interactions), and ethics (e.g., confidentiality of recorded participants)
impact the collection of video data. Luff and Heath (2012) delved into camera placement even
further by examining several methods for camera positioning such as the roving camera that
moves around the scene, use of stationary fixed cameras placed in strategic positions to capture
different angles of the scene, use of wide-angle lenses to capture more of the scene, mid-shot
recording to focus on small groups, and multi-camera approaches to record the scene from
different perspectives. Each approach has benefits and disadvantages pertaining to logistics or
feasibility, intrusiveness in the situation being recorded, or how the recording is framed by the
camera position to reveal or conceal naturally occurring events and interactions occurring.
Video data collection in online settings is inherently different when participants are
physically separated. It is no longer necessary to carry equipment into a physical classroom
and determine where to position equipment or what camera angles to use. However, other tools,
such as videoconferencing systems, are needed to facilitate the process of acquiring video data
or observing online classroom interactions in real time. For example, Berry (2017) used Adobe
Connect videoconferencing software to record video of synchronous online class meetings
during a qualitative case study of community building in online doctoral education. Saltz and
Heckman (2020) used video breakout rooms in their case study of an instructional strategy,
called a Structured Paired Activity, that put online students into two-member teams while
completing programming activities. Observations were conducted within the online video
breakout rooms to document naturally occurring behaviors and develop new insights about
team roles and processes. Ho (2019) applied a novel approach for acquiring video as part of a
study of online language learning. Participants were asked to install Camtasia screen recording
1976 The Qualitative Report 2021
software and record video while completing an online lesson. Participants verbalized their
experiences in a think-aloud approach while recording their computer screen and webcam. The
video files were uploaded by the participants to a secure, password-protected Dropbox drive
so that they could be accessed by the researcher for analysis. This approach offers participants
more control over the video they share, but also requires technical support for software
installation or transfer of large data files.
Qualitative Analysis of Video
Strategies for analysis of videos in qualitative research include indexing the videos to
develop a content log with time codes and descriptions of video content, segmenting videos
into events or clips relevant to the research questions, transcribing audio and visual content
contained in the videos, and conducting cycles of review to code, compare, and engage in fine-
grained analysis (Derry et al., 2010; Heath et al., 2010; Knoblauch & Tuma, 2020). These
strategies require the use of technology to facilitate the process of working with digital video
files. Researchers might choose software specifically designed for audiovisual analysis or use
CAQDAS programs that support analysis of multiple types of data including video (Estrada &
Koolen, 2018). However, software tools for video analysis differ in their functionality and
suitability for handling the research tasks associated with audiovisual data (Silver &
Patashnick, 2011). When selecting technologies for qualitative video analysis researchers will
find it prudent to consider factors such as functionality, cost, ease of use and whether the chosen
tools will do the job required to complete analysis. The situation is complicated when working
with online videos as a data source since CAQDAS tools often have limited support for online
data analysis (Silver & Bulloch, 2017). Desktop CAQDAS software opens linked video files
saved on the same computer, rather than online files, although there are some limited
exceptions. NVivo (QSR International, 2020) has a web browser extension called NCapture
that can be used to sample online social media data including YouTube videos and comments.
The videos remain on YouTube and display within the NVivo software. Analysis (i.e., coding)
can be done directly on the media timeline or on a synchronized transcript. At the time of this
writing, this functionality only works on computers running the Windows operating system
and is limited to YouTube videos.
Various problems surface when attempting to work collaboratively online during
qualitative analysis of video. Challenges include determining how to work collaboratively
online and how to manage video files during the process. With respect to the use of CAQDAS
in an online space, Silver and Bulloch (2017) have argued that there are three primary
approaches that researchers might choose: (a) work independently and merge copies of the
CAQDAS project files, (b) work on multi-user projects on a networked server, or (c) use an
online package that works directly through a web browser. Unfortunately, these strategies may
not work as effectively when analyzing online video content. Desktop CAQDAS programs
with links to video files saved on the same computer become broken when either the project
file or the media files are moved. Alternatively, if the videos are saved on a secure online drive,
it may not be possible to link directly to them from the desktop software. Downloading or
moving the files to another location for analysis may be tedious, time consuming, or possibly
restricted due to security concerns and the need to protect research participants recorded in the
videos. This is challenging for researchers who need to store large video files online such as
when working with a distributed research team who upload files from different sites or when
recording videos directly from an online classroom.
A possible solution to the dilemma of online qualitative video analysis is to use a
combination of technologies and methodologies that are strategically selected to meet the
specific requirements of the research project. The use of a “technology mashup” that includes
Chareen Snelson, Dazhi Yang, and Torrence Temple 1977
Web 2.0 technologies has been suggested as an approach where multiple technologies are used
to combine their relative advantages (Davidson et al., 2016, p. 608). Some of the affordances
of Web 2.0 technologies that have been identified as potentially valuable in qualitative research
include data storage, data organization and management, linking, commenting, annotating,
note taking, and collaboration (Davidson & di Gregorio, 2011; Silver & Bulloch, 2017).
Unfortunately, there has been a noted lack of discussion in the research literature about the
combination of technologies and methodologies to inform analysis of video and other
audiovisual data (Silver & Patashnick, 2011; Smith et al. 2016).
An Example from Computational Thinking Research
In this section, we describe a real-world situation where we grappled with the
challenges of conducting qualitative analysis of videos that were recorded in a classroom
setting and then uploaded to an online drive. The videos were recorded during a case study of
CT (computational thinking) practices in an eight-week after-school program for children in
fourth through sixth grades. The purpose of the after-school program, where the videos were
recorded, was to promote problem solving and CT during the process of building,
programming, and testing robots, built from Lego® Mindstorms® kits, that would navigate a
simulated Mars terrain and locate water (Yang et al., 2021). Participating students worked in
small teams of three under the guidance of two teachers, a former NASA astronaut, three
graduate research assistants, and several university faculty members (typically one to three
faculty per session). The program was part of a larger National Science Foundation (NSF)
funded project created to develop CT and an understanding of Science, Technology,
Engineering, and Math (STEM) subjects. Institutional Review Board (IRB) approval was
obtained prior to conducting the study on site and additional approval was obtained prior to
data analysis when additional research personnel joined the study.
Three groups of students, including six boys and three girls, were recorded during all
sixteen sessions of the program over eight weeks with two sessions per week while they
completed the activities. Each group was followed by a graduate research assistant who
recorded their respective group with a handheld video camera. Video recordings from each
session were uploaded to a secure password-protected Google Drive folder where they were
organized into folders by week and session. After the program had concluded, the Primary
Investigator (PI) for the project recruited another faculty member with expertise in qualitative
data analysis and a doctoral candidate with a background in educational technology and science
education to assist with analysis of the videos. The three members of this team brought different
perspectives and areas of expertise to the analysis phase. The PI had expertise in CT and was
involved in all facets of the project. The other two members of this investigation came in later
with a fresh perspective and an interest in examining how CT might be identified in “the wild”
of a classroom when students were engaged in the process of learning. As experienced
educators, we wondered how teachers might identify CT when they saw it happening during a
busy and sometimes chaotic project-based program.
The research question guiding the qualitative analysis of video was: How do the
students demonstrate CT through problem solving activities guided by project-based learning?
CT has been described as a fundamental skill that “involves solving problems, designing
systems, and understanding human behavior, by drawing on the concepts fundamental to
computer science” (Wing, 2006, p. 33). CT has been lauded as essential for everyone and
beneficial in nearly all disciplines (Bundy, 2007; Wing, 2006, 2008). Yet, consensus has not
been fully reached on the core attributes of CT, what it looks like in practice, or how to assess
its potential benefits even though efforts have been made toward this goal. One operational
definition for CT that emphasizes problem solving processes and dispositions or attitudes was
1978 The Qualitative Report 2021
developed in a collaborative effort with the International Society for Technology in Education
(ISTE), the Computer Science Teachers Association (CSTA), and leaders from higher
education, K-12, and industry sectors (ISTE & CSTA, 2011). Others have defined CT in
different ways based on components such as programming processes, abstraction, data
processing, symbolic representation, algorithms, conditional logic, problem solving, and
computing practice (Grover & Pea, 2013). CT is considered important, but the field remains
challenged by its diversity of definitions, models, instructional strategies, and assessments
(Cutumisu et al., 2019; Hsu et al., 2018; Shute et al., 2017; Tang et al., 2020).
The task of observing instances of CT in the videos of the after-school program was
complicated due to the lack of consensus about what CT looks like in K-12 practice and the
paucity of qualitative CT research that integrated video recordings from a naturalistic learning
environment. Relevant studies were difficult to find and provided limited information to inform
methodological decisions for qualitative video analysis. Only a few studies were identified that
shed light on how researchers have approached the task of qualitative research with video data
for research involving CT in a K-12 setting (Bowden, 2019; Hadad et al., 2020; Israel et al.,
2017; Jordan & McDaniel, 2014; Sullivan et al., 2016). These studies offered some insights
into how one might use video as part of a CT study in a naturalistic setting, but often left out
details about decisions for data management or analysis. Underreporting of methods is a noted
problem for studies involving analysis of qualitative visual data (Smith et al., 2016).
Furthermore, the methodological issues of online video have not been clearly addressed leaving
us to adapt what is known about qualitative video analysis when conducting research in a
collaborative online context.
In the next section of this paper, we share our approach and the process for online
qualitative analysis of video data that was recorded in an after-school robotics program for
upper elementary school children. The video files were uploaded to a secure online drive where
they remained during analysis due to the practicalities of working with a distributed research
team working at a distance from each other and requirements stipulated by the local
institutional review board.
Three-Stage Analysis of Online Video
We followed a three-stage process during qualitative analysis of the videos from the
after-school robotics program that included planning and setup, online analysis of videos, and
structural coding of memos to explore patterns across the data.
Planning and Setup
The first step was to plan the analysis procedure and set up the structure for online video
analysis. The video data set included 128 individual video files that ranged in duration from 4
seconds to 59 minutes and 50 seconds. The total amount of video was 61 hours, 38 minutes,
and 54 seconds. Our university uses the Google suite of tools that comes bundled with email
and online applications such as Google Drive for file storage and Google apps for creating
online documents, spreadsheets, or presentations. Video files had been uploaded to a secure
password-protected Google Drive where they were organized in a nested online file system.
This included a folder for each of the eight weeks of the after-school program and subfolders
for each session (2 sessions per week). Within each session folder there were additional folders
that stored the video files recorded by each of the three graduate research assistants who
recorded and uploaded the videos separately. Each research assistant had followed and
recorded a different group of students on site so that we were able to observe the work of three
teams with three students per team. It was necessary to work with the videos directly in Google
Chareen Snelson, Dazhi Yang, and Torrence Temple 1979
Drive to adhere to IRB requirements for secure data storage and to facilitate work done by a
geographically distributed team that had members living in two different states in the USA. In
addition, analysis occurred during the COVID-19 pandemic when access to campus was
restricted and research endeavors were conducted online, whenever possible.
The university provided licenses for CAQDAS software (i.e., NVivo), but technical
challenges prohibited the use of this desktop software for online analysis of video files stored
in Google Drive. At the time of analysis, we were unable to locate a CAQDAS program that
would do exactly what we needed, which was to view, code, and transcribe video in the
software without downloading the video files from the secure online drive. Therefore, we set
up an online system using Google Drive apps (e.g., Sheets and Docs) to manage analysis of the
video files. We started by creating a matrix in Google Sheets to organize linked data files and
track where CT was observed in the videos (Miles et al., 2020). A partial screenshot showing
the layout of the matrix in Google Sheets is shown in Figure 1 with identifying information
removed.
Figure 1
Screenshot of Matrix in Google Sheets
1980 The Qualitative Report 2021
The rows of the spreadsheet contained entries for individual video files that were
organized by week, session, and source (i.e., Person who recorded the video.). Columns in the
spreadsheet were labelled with elemental components of CT that we identified and defined
based on descriptions available in the literature (An & Lee, 2014; Grover & Pea, 2013; Wing,
2006; Yadav et al., 2011). The CT components we looked for in the videos included abstraction
(reducing complexity), decomposition (breaking problems into components), algorithms
(structuring a sequence of steps), automation (using programs to automate a process), heuristics
(using problem-solving strategies), conditional logic (applying constructs such as if-then-else),
vocabulary and terminology (using CT terminology such as variables and rotation), data
collection (gathering data), data structures (looking for patterns), simulation and modelling
(testing simulated models), and reporting and communication (documenting and presenting).
Definitions of each CT component were added to the matrix spreadsheet for easy access and
review during video analysis. A rating scale (1 to 3) was used to apply magnitude coding
described later in this paper.
In addition to the matrix spreadsheet, we used Google Docs to establish an online memo
system to record observations and generate a content log of the videos (Derry et al., 2010;
Heath et al., 2010; Knoblauch & Tuma, 2020). Individual memos were created for each
recorded session of the after-school program and included the date of analysis, which video
file was reviewed, and who completed the analysis. This was followed by a table where time
codes, descriptions of video content, screenshots from the videos, transcripts, and analytic
notations could be typed. A screenshot of the memo template is shown in Figure 2. Each memo
document was linked from its associated video entry in the matrix spreadsheet so that we could
easily access and review the information. We also created a set of online journals that included
a project journal to record key events in the research process and individual researcher journals
to reflect on our observations while analyzing the videos from each week of the after-school
program. Since the work was done online, we were able to review each other’s work at any
time. We met online through videoconferencing at beginning, midpoint, and end of analysis to
discuss the process and what we were observing in the videos.
Figure 2
Memo Template for Video Observations
Chareen Snelson, Dazhi Yang, and Torrence Temple 1981
Online Analysis of Videos
Video analysis was conducted entirely online and involved three key processes: (a)
Observing watch the videos to observe behaviors or speech that matched CT component
definitions, (b) Memoing write memos and journals to document video content and our
interpretation of it, and (c) Magnitude Coding apply a first-cycle qualitative coding process
using a magnitude coding approach (Miles et al., 2020; Saldaña, 2016). These processes
occurred simultaneously during observation of the video content as described below.
Observing
We watched the videos to observe what was happening in the classroom and how teams
of students were interacting during the robotics activities. We looked for observable evidence
of CT in student behavior or speech that corresponded to the definitions of CT components we
had derived from the literature. For example, we observed students applying a trial-and-error
heuristic when developing and testing their robots. They were programming the assembled
robots with the Lego® Mindstorms® EV3 software program. We watched them engage in
rapid problem-solving cycles where they would modify their programming, download it to the
robot, and then test the robot to see what happened. Then, they would quickly go back to the
computer, modify the program, and test again. This behavior corresponded to the CT
component definition of a heuristic as an experience-based strategy that facilitates problem
solving, such as trial-and-error (Yadav et al., 2011). The strategy we used to identify CT based
on observable behavior recorded in the videos was similar to Bene’s (2014) study where videos
of students were analyzed to look for verbal and nonverbal examples of metacognitive thinking
such as “exclamations of satisfaction, delight, dismay or frustration and nonverbal expressions
such as body language, eye gaze, gestures, pointing, and posture” (p. 6).
Memoing
Observations from the videos were recorded in the online memos (described in the
planning and setup section above) where we wrote detailed descriptions of the activities,
transcribed conversations, and noted observable evidence of CT. Screenshots of activities,
student journals, robotic equipment, laptop computer screens, the classroom setting, and the
simulated Mars environment were captured from the videos and included in the memos next to
the written observations. The memos were designed to provide a detailed account of visual and
audio information observed in the videos consistent with transcription approaches described
for audiovisual media (Evers, 2011; Silver & Patashnick, 2011).
Individual classroom activities, such as discussions or programming exercises, were
used as natural transition points to segment the videos into meaningful episodes where we
could look at how CT was exhibited based on activity type. Time codes where each activity
started and ended were also included in the memos together with the activity descriptions so
that video segments could be located easily (Knoblauch & Tuma, 2020). The approach of
segmenting classroom videos has been suggested as a strategy for identifying episodes that
capture phenomena of interest (Derry et al., 2010; Heath et al., 2010). A recent example is
found in Krist’s (2020) three-year longitudinal study conducted to examine classroom
community in the context of middle school science education where the primary data source
was video. The video corpus was reduced into meaningful segments “by selecting episodes of
science knowledge building activity during each recorded class period” (p. 425). The practice
of segmenting video helps to manage a large video data set and parse out the sections that
pertain directly to the purpose of the study.
1982 The Qualitative Report 2021
Magnitude Coding
Magnitude coding is a qualitative analysis technique used to indicate “intensity,
frequency, direction, presence, or evaluative content” (Miles et al., 2020, p. 71). When an
episode of CT was observed in the video it was noted in the memo and marked in the matrix
spreadsheet under the appropriate CT column with a rating of 1, 2, or 3 to indicate the level of
autonomy exhibited by the students. Level 1 ratings were used for teacher-led activities such
as whole-class discussions or watching a video. Level 2 ratings were used for one-on-one or
small group work sessions where learners were actively engaged in problem solving, building
robots, or programming with coaching from a mentor or teacher. Level 3 ratings were used
when learners were working independently, either alone or in small teams, with minimal or no
support from a teacher or mentor. The magnitude coding provided a dimension beyond simply
marking where CT was observed to indicate levels of autonomy exhibited in observable CT
behaviors. The magnitude coding recorded in the matrix spreadsheet provided a way to track
growth over time in a concise visual display.
Structural Coding of Memos
The next stage of analysis was structural coding of the analytic memo text (Miles et al.,
2020; Saldaña, 2016). Structural coding is used to group or categorize similar types of content
for further analysis. The purpose of this stage was to collate related information and compare
notes about activities, CT components, and the rationale for magnitude coding written in the
memos. Multiple memo files, in Google Docs form, had been created for each session of the
eight-week after-school program. The tools within Google Drive did not readily support the
process of collating groups of similarly coded items in the memos such as everything noted
about class discussions, or all comments related to a specific CT component. Therefore, one
member of the team with extensive NVivo experience downloaded the memos as Word
documents for coding in NVivo. Structural coding was used to collate related information in
the memos including activity type, CT component, and level of independence associated with
the magnitude coding (See Figure 3).
Chareen Snelson, Dazhi Yang, and Torrence Temple 1983
Figure 3
Screenshot of Structural Coding in NVivo
After structural coding was completed, it was possible to run queries to look for patterns
across the data. For example, matrix coding queries were run in NVivo to examine the
intersection of activity types and CT components as shown in Figure 4. The heatmap function
helped us identify areas of prevalence across intersections of coded content. The underlying
memo text could be opened by clicking each cell in the matrix to review the coded text. The
1984 The Qualitative Report 2021
combination of coding and queries was used to identify how CT was associated with different
types of activities. For example, we identified a strong pattern of heuristics, algorithms, and
logical thinking during programming activities. We found that students became progressively
more autonomous and exhibited CT on their own as the program progressed. In addition,
different activities promoted different aspects of CT. For example, when writing a computer
program for the robot we consistently observed conditional logic and algorithms. Detailed
results of the CT study will be reported in a separate manuscript since the focus of the present
paper is on the methodological and technical issues that occurred behind the scenes.
Figure 4
Screenshot of Heatmap from Matrix Query in NVivo
Discussion
In this paper, we shared our approach for qualitative analysis of video data recorded
during an after-school robotics program that was designed to promote CT. This research project
required us to overcome various challenges that stemmed from conducting video analysis
online, which was necessary due to a distributed research team, data security requirements, and
limited access to campus during the COVID-19 pandemic. The main contribution of this work
is to unpack how online analysis of video has been approached and to promote further
discussion of audiovisual analysis where there has been a noted lack of detailed documentation
about analytic and technical procedures (Silver & Patashnick, 2011; Smith et al. 2016). The
key takeaways from this paper are found in the disclosure of how we addressed methodological
or technological challenges that might be encountered when conducting studies involving
online analysis of video. A central methodological issue in any video study is identifying what
to look for in the video content and establishing a process to document and analyze what is
observed. In our study, this meant first turning to the literature to identify several elemental
components of CT. This gave us a starting point for what to look for in this contested domain
where definitions and research evidence are evolving. Then, we had to identify which tools
were available for this type of analysis. We considered the relative advantages of various
technologies, including cost, ease of use, compatibility with the computers used to do the
research (i.e., Some of us were using Mac and others on Windows operating systems), and
functionality of the software for conducting qualitative analysis of online video. We learned
that CAQDAS software is robust for desktop analysis but is limited for the type of online
qualitative video data analysis we needed to do (Evers, 2018). Therefore, we combined online
tools found in the collection of Google Drive applications with desktop CAQDAS to facilitate
the full range of analysis procedures. This process was consistent with recommendations from
the literature on selection and use of digital technologies for qualitative research (Davidson et
al., 2016; Davidson & di Gregorio, 2011; Silver & Bulloch, 2017).
Chareen Snelson, Dazhi Yang, and Torrence Temple 1985
Further work is needed to develop methodological strategies and identify how to select
or use technologies that support analysis of online video. The approach shared in this paper is
only one of many possible approaches for solving the challenge of collaborative analysis of
online video. There are other issues to examine further including those that Evers (2018) noted
with online (i.e., in the cloud) analysis of qualitative data. These include benefits such as
anywhere access or minimization of compatibility issues and potential problems such as data
security and shared responsibility among researchers for safekeeping of data. In addition,
CAQDAS tools currently offer limited or no support for online analysis of videos (Silver &
Patashnick, 2011). More work is needed to develop new technologies or adapt analysis methods
to work within existing technological capabilities. Additional methodological papers with
examples detailing how researchers grapple with these challenges will promote development
of solutions for online qualitative video analysis.
References
An, S., & Lee, Y. (2014). Development of pre-service teacher education program for
computational thinking. In M. Searson & M. N. Ochoa (Eds.), Proceedings of Society
for Information Technology & Teacher Education International Conference 2014 (pp.
20552059). Association for the Advancement of Computing in Education (AACE).
https://www.learntechlib.org/p/131092
Bene, R. (2014). Opportunities and challenges of using video to examine high school students’
metacognition. The Qualitative Report, 19(35), 126. https://doi.org/10.46743/2160-
3715/2014.1020
Bowden, H. M. (2019). Problem-solving in collaborative game design practices: Epistemic
stance, affect, and engagement. Learning, Media and Technology, 44(2), 124143.
https://doi.org/10.1080/17439884.2018.1563106
Berry, S. (2017). Building community in online doctoral classrooms: Instructor practices that
support community. Online Learning, 21(2), 122.
https://doi.org/10.24059/olj.v21i2.875
Bundy, A. (2007). Computational thinking is pervasive. Journal of Scientific and Practical
Computing, 1(2), 6769. https://core.ac.uk/download/pdf/28961399.pdf
Clarke, D., & Chan, M. C. E. (2019). The use of video in classroom research: Window, lens or
mirror. In L. Xu, G. Aranda, W. Widjaja, & D. Clarke (Eds.), Video-based research in
education: Cross disciplinary perspectives (pp. 517). Routledge.
Cutumisu, M., Adams, C., & Lu, C. (2019). A scoping review of empirical research on recent
computational thinking assessments. Journal of Science Education and Technology,
28(6), 651676. https://doi.org/10.1007/s10956-019-09799-3
Davidson, J., & di Gregorio, S. (2011). Qualitative research and technology: In the midst of a
revolution. In N. K. Denzin & Y. S. Lincoln (Eds.), The SAGE handbook of qualitative
research (4
th
ed., pp. 627643). SAGE.
Davidson, J., Paulus, T., & Jackson, K. (2016). Speculating on the future of digital tools for
qualitative research. Qualitative Inquiry, 22(7), 606610.
https://doi.org/10.1177/1077800415622505
Derry, S. J., Pea, R. D., Barron, B., Engle, R. A., Erickson, F., Goldman, R., Hall, R.,
Koschmann, T., Lemke, J. L., Sherin, M. G., & Sherin, B. L. (2010). Conducting video
research in the learning sciences: Guidance on selection, analysis, technology, and
ethics. Journal of the Learning Sciences, 19(1), 353.
https://doi.org/10.1080/10508400903452884
Erickson, F. (2011). Uses of video in social research: A brief history. International Journal of
Social Research Methodology, 14(3), 179189.
1986 The Qualitative Report 2021
https://doi.org/10.1080/13645579.2011.563615
Estrada, L. M., & Koolen, M. (2018). Audiovisual media annotation using qualitative data
analysis software: A comparative analysis. The Qualitative Report, 23(13), 4060.
https://doi.org/10.46743/2160-3715/2018.3035
Evers, J. C. (2011). From the past into the future. How technological developments change our
ways of data collection, transcription and analysis. Forum Qualitative Sozialforschung,
12(1). https://doi.org/10.17169/fqs-12.1.1636
Evers, J. C. (2018). Current issues in qualitative data analysis software (QDAS): A user and
developer perspective. The Qualitative Report, 23(13), 6173.
https://doi.org/10.46743/2160-3715/2018.3205
Fitzgerald, A., Hackling, M., & Dawson, V. (2013). Through the viewfinder: Reflecting on the
collection and analysis of classroom video data. International Journal of Qualitative
Methods, 12(1), 5264. https://doi.org/10.1177/160940691301200127
Gray, L., Wong-Wylie, G., Rempel, G., & Cook, K. (2020). Expanding qualitative research
interviewing strategies: Zoom video communications. The Qualitative Report, 25(5),
12921301. https://doi.org/10.46743/2160-3715/2020.4212
Grover, S., & Pea, R. (2013). Computational thinking in K12: A review of the state of the
field. Educational Researcher, 42(1), 3843.
https://doi.org/10.3102/0013189X12463051
Hadad, R., Thomas, K., Kachovska, M., & Yin, Y. (2020). Practicing formative assessment for
computational thinking in making environments. Journal of Science Education and
Technology, 29(1), 162173. https://doi.org/10.1007/s10956-019-09796-6
Heath, C., Hindmarsh, J., & Luff, P. (2010). Video in qualitative research. SAGE.
Ho, W. Y. J. (2019). ‘I knew that you were there, so I was talking to you’: The use of screen-
recording videos in online language learning research. Qualitative Research, Advance
Online Publication, 120. https://doi.org/10.1177/1468794119885044
Hsu, T.-C., Chang, S.-C., & Hung, Y.-T. (2018). How to learn and how to teach computational
thinking: Suggestions based on a review of the literature. Computers & Education, 126,
296310. https://doi.org/10.1016/j.compedu.2018.07.004
International Society for Technology in Education (ISTE), & Computer Science Teachers
Association (CSTA). (2011). Operational definition of computational thinking for K
12 education. https://id.iste.org/docs/ct-documents/computational-thinking-
operational-definition-flyer.pdf
Israel, M., Wherfel, Q. M., Shehab, S., Melvin, O., & Lash, T. (2017). Describing elementary
students’ interactions in K-5 puzzle-based computer Science Environments using the
Collaborative Computing Observation Instrument (C-COI). Association for
Computing Machinery. https://doi.org/10.1145/3105726.3106167
Jordan, M. E., & McDaniel, R. R. (2014). Managing uncertainty during collaborative problem
solving in elementary school teams: The role of peer influence in robotics engineering
activity. Journal of the Learning Sciences, 23(4), 490536.
https://doi.org/10.1080/10508406.2014.896254
Knoblauch, H., & Tuma, R. (2020). Videography: An interpretive approach to video-recorded
micro-social interaction. In L. Pauwels & D. Mannay (Eds.), The SAGE handbook of
visual research methods (2
nd
ed., pp. 129142). SAGE Publications.
Krist, C. (2020). Examining how classroom communities developed practice-based
epistemologies for science through analysis of longitudinal video data. Journal of
Educational Psychology, 112(3), 420443. https://doi.org/10.1037/edu0000417
Lobe, B., Morgan, D., & Hoffman, K. A. (2020). Qualitative data collection in an era of social
distancing. International Journal of Qualitative Methods, 19, 18.
https://doi.org/10.1177/1609406920937875
Chareen Snelson, Dazhi Yang, and Torrence Temple 1987
Luff, P., & Heath, C. (2012). Some ‘technical challenges’ of video analysis: social actions,
objects, material realities and the problems of perspective. Qualitative Research, 12(3),
255279. https://doi.org/10.1177/1468794112436655
Miles, M. B., Huberman, A. M., & Saldaña, J. (2020). Qualitative data analysis: A methods
sourcebook (4
th
ed.). SAGE.
QSR International (2020). NVivo [Computer software].
https://www.qsrinternational.com/nvivo-qualitative-data-analysis-software/home
Saldaña, J. (2016). The coding manual for qualitative researchers (3
rd
ed.). SAGE.
Saltz, J., & Heckman, R. (2020). Using structured pair activities in a distributed online breakout
room. Online Learning, 24(1), 227244. https://doi.org/10.24059/olj.v24i1.1632
Shute, V. J., Sun, C., & Asbell-Clarke, J. (2017). Demystifying computational thinking.
Educational Research Review, 22, 142158.
https://doi.org/10.1016/j.edurev.2017.09.003
Silver, C., & Bulloch, S. L. (2017). CAQDAS at a crossroads: Affordances of technology in
an online environment. In N. G. Fielding, R. M. Lee, & G. Blank (Eds.), The SAGE
handbook of online research methods (2
nd
ed., pp. 470485). SAGE.
Silver, C., & Lewins, A. (2014). Using software in qualitative research: A step-by-step guide
(2
nd
ed.). SAGE.
Silver, C., & Patashnick, J. (2011). Finding fidelity: Advancing audiovisual analysis using
software. Forum: Qualitative Social Research, 12(1), 122. http://www.qualitative-
research.net/index.php/fqs/article/view/1629
Silver, L. (2019, Feburary 5). Smartphone ownership is growing rapidly around the world, but
not always equally. Pew Research Center.
https://www.pewresearch.org/global/2019/02/05/smartphone-ownership-is-growing-
rapidly-around-the-world-but-not-always-equally/
Smith, S. K., Mountain, G. A., & Hawkins, R. J. (2016). A scoping review to identify the
techniques frequently used when analysing qualitative visual data. International
Journal of Social Research Methodology, 19(6), 693715.
https://doi.org/10.1080/13645579.2015.1087141
Sullivan, F. R., Keith, K., & Wilson, N. C. (2016). Learning from the periphery in a
collaborative robotics workshop for girls. Universal Journal of Educational Research,
4(12), 28142825. https://doi.org/10.13189/ujer.2016.041215
Tang, X., Yin, Y., Lin, Q., Hadad, R., & Zhai, X. (2020). Assessing computational thinking: A
systematic review of empirical studies. Computers & Education, 148, 122.
https://doi.org/10.1016/j.compedu.2019.103798
Wing, J. M. (2006). Computational thinking. Communications of the ACM, 49(3), 33.
https://doi.org/10.1145/1118178.1118215
Wing, J. M. (2008). Computational thinking and thinking about computing. Philosophical
Transactions of the Royal Society A: Mathematical, Physical and Engineering
Sciences, 366(1881), 37173725. https://doi.org/10.1098/rsta.2008.0118
Yadav, A., Zhou, N., Mayfield, C., Hambrusch, S., & Korb, J. T. (2011). Introducing
computational thinking in education courses. In Proceedings of the 42nd ACM
technical symposium on Computer science education (SIGCSE '11). Association for
Computing Machinery. https://doi.org/10.1145/1953163.1953297
Yang, D., Baek, Y., Ching, Y., Swanson, S., Chittoori, B., Wang, S. (2021). Infusing
computational thinking in an integrated STEM curriculum: User reactions and lessons
learned. European Journal of STEM Education, 6(1).
https://doi.org/10.20897/ejsteme/9560
1988 The Qualitative Report 2021
Author Note
Chareen Snelson in an Associate Professor in the Department of Educational
Technology at Boise State University. Please direct correspondence to
Dazhi Yang is a Professor in the Department of Educational Technology at Boise State
University. Please direct correspondence to [email protected].
Torrence Temple is a Doctoral Candidate in Educational Technology at Boise State
University. Please direct correspondence to [email protected].
Copyright 2021: Chareen Snelson, Dazhi Yang, Torrence Temple, and Nova
Southeastern University.
Article Citation
Snelson, C., Yang, D., & Temple, T. (2021). Addressing the challenges of online video analysis
in qualitative studies: A worked example from computational thinking research. The
Qualitative Report, 26(6), 1974-1988. https://doi.org/10.46743/2160-3715/2021.4734