v1.1.4 : Fixed a case in which func tests didnt detect errors

This commit is contained in:
Brendan LE GLAUNEC
2016-12-13 15:22:53 +01:00
parent 1e25be7ca5
commit 3510b98797
14 changed files with 102 additions and 95 deletions
+2 -5
View File
@@ -21,7 +21,6 @@ namespace cameradar {
// Uses the subnets specified in the conf file to launch nmap
bool
print::run() const {
bool first = true;
std::vector<stream_model> results = (*cache)->get_valid_streams();
std::ofstream file;
@@ -32,13 +31,11 @@ print::run() const {
}
file << "[\n";
unsigned int i = 0;
for (const auto& stream : results) {
file << deserialize(stream).toStyledString();
if (first) {
file << ",";
first = false;
}
if (++i < results.size()) file << ",";
LOG_INFO_("Generated JSON Result : " + deserialize(stream).toStyledString(), "print");
}
+2 -5
View File
@@ -4,17 +4,16 @@ MAINTAINER brendan.leglaunec@etixgroup.com
ENV LD_LIBRARY_PATH="/cameradar/libraries"
# install go
# Manually install go
RUN apt-get update && apt-get install -y make git wget curl
RUN wget https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz
RUN tar -C /usr/local -xzf go1.6.linux-amd64.tar.gz
# set variable env
ENV GOPATH=/cameradartest/go
ENV PATH=$PATH:/go/bin
ENV PATH=$PATH:/usr/local/go/bin
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
# needed for cameradar
RUN apt-get update && apt-get install -y \
nmap \
libmysqlclient18 \
@@ -30,13 +29,11 @@ RUN apt-get install -y psmisc
ADD cameradar_*_Debug_Linux.tar.gz /
RUN mv cameradar_*_Debug_Linux cameradar
# create cameradaratest folder in go src path
RUN mkdir -p /cameradartest/go/src/cameradartest
COPY src/*.go /cameradartest/go/src/cameradartest/
COPY ./conf /conf
ADD ./docker/run_cameradartest.sh /run.sh
# get go deps
RUN go get github.com/go-sql-driver/mysql
RUN mkdir /thumbnails
-1
View File
@@ -14,7 +14,6 @@ RUN apt-get update && apt-get install -y \
libgstreamer-plugins-base1.0-dev \
libgstreamer-plugins-bad1.0-dev
ADD ./docker/screen.png /vlc/screen.png
COPY ./docker/run_ces.sh /start.sh
COPY ./camera_emulation_server /camera_emulation_server
Binary file not shown.

Before

Width:  |  Height:  |  Size: 558 KiB

-1
View File
@@ -21,7 +21,6 @@ import (
)
func (t *Tester) parseConfig() bool {
// Get config file path
confPath := "conf/cameratest.conf.json"
av := len(os.Args)
if av > 1 {
+6 -24
View File
@@ -22,40 +22,22 @@ import (
_ "github.com/go-sql-driver/mysql"
)
// MysqlDB contains the MySQL configuration
type MysqlDB struct {
Host string `json:"host"`
Port int `json:"port"`
User string `json:"user"`
Password string `json:"password"`
DbName string `json:"db_name"`
}
func (t *Tester) dropDB() bool {
dsn := t.DB.User + ":" + t.DB.Password + "@" + "tcp(" + t.DB.Host + ":" + strconv.Itoa(t.DB.Port) + ")/" + t.DB.DbName + "?charset=utf8"
dsn := t.ServiceConf.DbUser + ":" + t.ServiceConf.DbPassword + "@" + "tcp(" + t.ServiceConf.DbHost + ":" + strconv.Itoa(t.ServiceConf.DbPort) + ")/" + t.ServiceConf.DbName + "?charset=utf8"
db, err := sql.Open("mysql", dsn)
if err != nil {
fmt.Println(err)
}
defer db.Close()
q := "DROP DATABASE " + t.DB.DbName + ";"
q := "DROP DATABASE " + t.ServiceConf.DbName + ";"
_, err = db.Exec(q)
if err != nil {
fmt.Println(err)
}
fmt.Println("------ Dropped Cameradar Database -------")
return true
}
func (t *Tester) configureDatabase(DataBase *MysqlDB) bool {
var db MysqlDB
db.Host = t.Cameradar.DbHost
db.Port = t.Cameradar.DbPort
db.User = t.Cameradar.DbUser
db.Password = t.Cameradar.DbPassword
db.DbName = t.Cameradar.DbName
*DataBase = db
return true
}
+9 -5
View File
@@ -24,19 +24,23 @@ import (
// Start read log of service
func readLog(service *Service, reader io.ReadCloser) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
str := scanner.Text()
if service.Console {
fmt.Printf("[%s] %s\n", service.Path, str)
if service.Config.Console {
fmt.Printf("[%s] %s\n", service.Config.Path, str)
}
fmt.Printf("%s\n", str)
service.Mutex.Lock()
service.Logs = append(service.Logs, str)
service.Mutex.Unlock()
}
if err := scanner.Err(); err != nil {
fmt.Printf("[%s] Service failed: %s\n", service.Path, err)
err := scanner.Err()
if err != nil {
fmt.Printf("[%s] Service failed: %s\n", service.Config.Path, err)
}
fmt.Printf("Logger of service: [%s] stopped\n", service.Path)
fmt.Printf("Logger of service: [%s] stopped\n", service.Config.Path)
service.Active = false
}
+1
View File
@@ -41,6 +41,7 @@ func main() {
fmt.Println("-> Write results FAILED")
os.Exit(1)
}
fmt.Println("--- Writing results done ---")
os.Exit(0)
}
+7 -1
View File
@@ -47,11 +47,13 @@ func getResult(test *[]Result, resultPath string) bool {
fmt.Printf("\nCan't open result file: %s\n", err)
return false
}
dec := json.NewDecoder(resultFile)
if err = dec.Decode(&test); err != nil {
fmt.Printf("\nUnable to deserialize result file: %s\n", err)
return false
}
return true
}
@@ -72,12 +74,14 @@ func isValid(e *Result, r Result) bool {
e.err = errors.New(e.Address + " had a different validity than expected")
return false
}
fmt.Println(e.Address + "seems valid.")
return true
}
// Extend needs refacto
// Extend takes a slice of Results and adds a new element to it
func Extend(slice []Result, element Result) []Result {
n := len(slice)
if n == cap(slice) {
// Slice is full; must grow.
// We double its size and add 1, so if the size is zero we still grow.
@@ -85,7 +89,9 @@ func Extend(slice []Result, element Result) []Result {
copy(newSlice, slice)
slice = newSlice
}
slice = slice[0 : n+1]
slice[n] = element
return slice
}
+20 -15
View File
@@ -21,8 +21,8 @@ import (
"sync"
)
// Service needs refacto
type Service struct {
// ServiceConfig contains the configuration variables for the service structure
type ServiceConfig struct {
Path string `json:"path"`
Args string `json:"args"`
Ports string `json:"ports"`
@@ -35,17 +35,22 @@ type Service struct {
DbPassword string `json:"db_password"`
DbName string `json:"db_name"`
Console bool `json:"console"`
Logs []string
Active bool // Based on io.ReadCloser status
Mutex sync.Mutex
cmd *exec.Cmd // Go handler of the service
}
func startService(service *Service) bool {
// Service allows to run a command and to access its logs asynchronously
type Service struct {
Config ServiceConfig // Configuration variables
Logs []string // Contains the executer's logs
Active bool // Based on io.ReadCloser status
Mutex sync.Mutex // Used to append to the logs safely
cmd *exec.Cmd // Pointer to the executer
}
func startService(service *Service, config ServiceConfig) bool {
// Launch service
args := strings.Fields(service.Args)
service.cmd = exec.Command(service.Path, args...)
service.Config = config
args := strings.Fields(service.Config.Args)
service.cmd = exec.Command(service.Config.Path, args...)
handler, err := service.cmd.StdoutPipe()
if err != nil {
@@ -64,7 +69,7 @@ func startService(service *Service) bool {
return false
}
fmt.Printf("Service: [%s] started\n", service.Path)
fmt.Printf("Service: [%s] started\n", service.Config.Path)
service.Active = true
// Read service logs and update service status
@@ -83,15 +88,15 @@ func stopService(service *Service) {
// Kill all instances of specified service
func killService(service *Service) {
// Sending SIGTERM
fmt.Printf("Executing: killall %s\n", service.Path)
cmd := exec.Command("killall", service.Path)
fmt.Printf("Executing: killall %s\n", service.Config.Path)
cmd := exec.Command("killall", service.Config.Path)
err := cmd.Run()
if err != nil {
fmt.Println(err)
}
sigAbort := []string{service.Path, "-s", "SIGABRT"}
fmt.Printf("Executing: killall %s -s SIGABRT\n", service.Path)
sigAbort := []string{service.Config.Path, "-s", "SIGABRT"}
fmt.Printf("Executing: killall %s -s SIGABRT\n", service.Config.Path)
cmd = exec.Command("killall", sigAbort...)
err = cmd.Run()
if err != nil {
+20 -17
View File
@@ -23,9 +23,9 @@ import (
// Test represents a test launched with Cameradar
type Test struct {
expected []Result
result []Result
time time.Duration
expected []Result // Contains the expected results
result []Result // Contains the results that have been validated
time time.Duration // Contains the runtime duration
}
func removeResult(expected []Result, index int) []Result {
@@ -52,43 +52,46 @@ func (t *Tester) invokeTestCase(testCase *Test, wg *sync.WaitGroup) {
// Then, if the result did not match the expected but it was supposed to fail
// Add it to the valid results and remove it from the expected slice
func (t *Tester) runTestCase(test *Test) {
startService(&t.Cameradar)
startService(&t.Cameradar, t.ServiceConf)
for t.Cameradar.Active {
time.Sleep(25 * time.Millisecond)
}
var validResults []Result
var invalidResults []Result
if getResult(&test.result, "/tmp/shared/result.json") {
for _, r := range test.result {
r.Valid = true
for index, e := range test.expected {
fmt.Println("Result : ", r)
fmt.Println("Expected test : ", e)
if e.Address == r.Address && isValid(&e, r) {
// _, err := os.Stat(r.Thumb)
// if err == nil) {
fmt.Println("The result of ", r.Address, " is valid and the thumbnails were generated by Cameradar.")
fmt.Println("The result of ", r.Address, " is valid.")
validResults = Extend(validResults, r)
test.expected = removeResult(test.expected, index)
break
// } else {
// e.err = error{"The result of " + e.Address + " seemed valid, but the thumbnails could not be generated by Cameradar : " + err.Error()}
// }
}
}
}
// This is in order to avoid checking the same values twice
copy := test.expected
for index, e := range copy {
for _, e := range test.expected {
if !e.Valid {
fmt.Println("The result of", e.Address, "successfully failed.")
validResults = Extend(validResults, e)
test.expected = removeResult(test.expected, index)
} else {
e.err = errors.New("The camera with the address " + e.Address + " was not found by cameradar")
test.expected = removeResult(test.expected, index)
test.expected = Extend(test.expected, e)
if e.err == nil {
e.err = errors.New("The camera with the address " + e.Address + " was not found by cameradar")
}
invalidResults = Extend(invalidResults, e)
fmt.Println("Should have been valid but was not found : ", e.Address)
}
}
test.result = validResults
test.expected = invalidResults
} else {
test.expected = nil
test.result = nil
}
}
+11 -11
View File
@@ -21,12 +21,12 @@ import (
// Tester is the structure that will manage the whole testing
type Tester struct {
Cameradar Service `json:"cameradar"`
Output string
ServiceConf ServiceConfig `json:"cameradar"`
Output string `json:"output"`
Tests []Result `json:"tests"`
Tests []Result
Result *Test
DB MysqlDB
Cameradar Service
Result *Test
}
// Init gets the testing configuration and makes sure that no other Cameradar service is running at the moment
@@ -49,12 +49,12 @@ func (t *Tester) Run() bool {
fmt.Println("\n- Launching all tests")
var newTest = new(Test)
newTest.expected = t.Tests
if t.configureDatabase(&t.DB) {
t.dropDB()
wg.Add(1)
go t.invokeTestCase(newTest, &wg)
t.Result = newTest
}
t.dropDB()
wg.Add(1)
go t.invokeTestCase(newTest, &wg)
t.Result = newTest
wg.Wait()
fmt.Println("All tests completed")
return true
+9 -5
View File
@@ -24,9 +24,6 @@ import (
"os"
)
////////////////////////////////////////////////
// Data declarations
// JUnitTestSuites is a collection of JUnit test suites.
type JUnitTestSuites struct {
XMLName xml.Name `xml:"testsuites"`
@@ -73,12 +70,17 @@ func (t *Tester) WriteResults(result Test, output string) bool {
fmt.Printf("The tests were unsuccessful: %s\n", err)
return false
}
fmt.Printf("-> JUnit XML report written: %s\n", output)
return true
}
// Write tests results under JUnit format on w
func (t *Tester) writeJUnitReportXML(result Test, rw io.ReadWriter, output string) error {
if result.expected == nil && result.result == nil {
return errors.New("Test results could not be deserialized.")
}
suites := JUnitTestSuites{}
buf, err := ioutil.ReadFile(output)
@@ -110,6 +112,7 @@ func (t *Tester) writeJUnitReportXML(result Test, rw io.ReadWriter, output strin
Time: fmt.Sprintf("%.6f", result.time.Seconds()),
Failure: nil,
}
if e.err != nil {
testCase.Failure = &JUnitFailure{
Message: e.err.Error(),
@@ -128,6 +131,7 @@ func (t *Tester) writeJUnitReportXML(result Test, rw io.ReadWriter, output strin
successCount++
}
}
fmt.Println("--- Test summary ---")
if successCount > 0 {
fmt.Printf("Results: %d/%d (%d%%)\n", successCount, successCount+failureCount, successCount*100/(successCount+failureCount))
@@ -137,19 +141,19 @@ func (t *Tester) writeJUnitReportXML(result Test, rw io.ReadWriter, output strin
}
suites.TestSuites = append(suites.TestSuites, ts)
// Fix indent
bytes, err := xml.MarshalIndent(suites, "", "\t")
if err != nil {
return err
}
// Write in param stream
w, err := os.OpenFile(output, os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return err
}
writer := io.Writer(w)
writer.Write(bytes)
if failureCount > 0 {
return errors.New("Some cameras were not successfully accessed.")
}
+15 -5
View File
@@ -18,29 +18,39 @@ function make_docker_command {
cmd="$cmd --link=\"$name\""
done
# add mysql libk
# add mysql link
cmd="$cmd --link=\"cameradar-database\""
# add cameradar srcs
# add cameradar sources
cmd="$cmd -v \"$(pwd)/src:/go/src/cameradartest\""
# add cmaeradar conf
# add cameradar testing volume
cmd="$cmd -v \"$(pwd)/:/tmp/tests\""
# add container name
# add cameradar shared volume
cmd="$cmd -v \"$(pwd)/:/tmp/shared\""
# add container name
cmd="$cmd cameradartest"
}
function start_test {
# Generate all cameras
./docker/gen_cameras.sh start $1 ./docker/cameratest.conf.tmpl.json
# Prepare docker command
make_docker_command $1
# Launch docker command
eval $cmd
# Get its return
ret=$?
# Stop all camera containers
./docker/gen_cameras.sh stop
return $ret
}
# build images
echo "building docker images"
# building fake-camera container
docker build --no-cache -f Dockerfile-camera -t fake-camera .