I have a class A, which has two methods :
def app1():
----some code-----
app2() # line 3
def app2():
----some code---
here while writing unit test for above class, I am calling app1() method
but I want to skip the calling method app2() from app1() method.
class TestController(unittest.TestCase):
def setUp(self):
self.app = app1() # its failing here(at line 3), because there is some DB setting inside app2() which i want to skip.
You are talking about mocking
from unittest import TestCase
from unittest.mock import patch
from apps import app1
class App1Tests(TestCase):
#patch('apps.app2')
def test_app1(self, app2):
app1()
app2.assert_called_once_with()
Related
I am trying to write unittests for some of the tasks built with Airflow TaskFlow API. I tried multiple approaches for example, by creating a dagrun or only running the task function but nothing is helping.
Here is a task where I download a file from S3, there is more stuff going on but I removed that for this example.
#task()
def updates_process(files):
context = get_current_context()
try:
updates_file_path = utils.download_file_from_s3_bucket(files.get("updates_file"))
except FileNotFoundError as e:
log.error(e)
return
# Do something else
Now I was trying to write a test case where I can check this except clause. Following is one the example I started with
class TestAccountLinkUpdatesProcess(TestCase):
#mock.patch("dags.delta_load.updates.log")
#mock.patch("dags.delta_load.updates.get_current_context")
#mock.patch("dags.delta_load.updates.utils.download_file_from_s3_bucket")
def test_file_not_found_error(self, download_file_from_s3_bucket, get_current_context, log):
download_file_from_s3_bucket.side_effect = FileNotFoundError
task = account_link_updates_process({"updates_file": "path/to/file.csv"})
get_current_context.assert_called_once()
log.error.assert_called_once()
I also tried by creating a dagrun as shown in the example here in docs and fetching the task from the dagrun but that also didin't help.
I was struggling to do this myself, but I found that the decorated tasks have a .function parameter: https://github.dev/apache/airflow/blob/be7cb1e837b875f44fcf7903329755245dd02dc3/airflow/decorators/base.py#L522
You can then use .funciton to call the actual function. Using your example:
class TestAccountLinkUpdatesProcess(TestCase):
#mock.patch("dags.delta_load.updates.log")
#mock.patch("dags.delta_load.updates.get_current_context")
#mock.patch("dags.delta_load.updates.utils.download_file_from_s3_bucket")
def test_file_not_found_error(self, download_file_from_s3_bucket, get_current_context, log):
download_file_from_s3_bucket.side_effect = FileNotFoundError
task = dags.delta_load.updates.updates_process
# Call the function for testing
task.function({"updates_file": "path/to/file.csv"})
get_current_context.assert_called_once()
log.error.assert_called_once()
This prevents you from having to set up any of the DAG infrastructure and just run the python function as intended!
This is what I could figure out. Not sure if this is the right thing but it works.
class TestAccountLinkUpdatesProcess(TestCase):
TASK_ID = "updates_process"
#classmethod
def setUpClass(cls) -> None:
cls.dag = dag_delta_load()
#mock.patch("dags.delta_load.updates.log")
#mock.patch("dags.delta_load.updates.get_current_context")
#mock.patch("dags.delta_load.updates.utils.download_file_from_s3_bucket")
def test_file_not_found_error(self, download_file_from_s3_bucket, get_current_context, log):
download_file_from_s3_bucket.side_effect = FileNotFoundError
task = self.dag.get_task(task_id=self.TASK_ID)
task.op_args = [{"updates_file": "file.csv"}]
task.execute(context={})
log.error.assert_called_once()
UPDATE: Based on the answer of #AetherUnbound I did some investigation and found that we can use task.__wrapped__() to call the actual python function.
class TestAccountLinkUpdatesProcess(TestCase):
#mock.patch("dags.delta_load.updates.log")
#mock.patch("dags.delta_load.updates.get_current_context")
#mock.patch("dags.delta_load.updates.utils.download_file_from_s3_bucket")
def test_file_not_found_error(self, download_file_from_s3_bucket, get_current_context, log):
download_file_from_s3_bucket.side_effect = FileNotFoundError
update_process.__wrapped__({"updates_file": "file.csv"})
log.error.assert_called_once()
I have pipelines where the mechanics are always the same, a sequence of two tasks.
So I try to abstract the construction of it through a parent abstract class (using TaskFlow API):
from abc import ABC, abstractmethod
from airflow.decorators import dag, task
from datetime import datetime
def AbstractDag(ABC):
#abstractmethod
def task_1(self):
"""task 1"""
#abstractmethod
def task_2(self, data):
"""task 2"""
def dag_wrapper(self):
#dag(schedule_interval=None, start_date=datetime(2022, 1, 1))
def dag():
#task(task_id='task_1')
def task_1():
return self.task_1()
#task(task_id='task_2')
def task_2(data):
return self.task_2(data)
task_2(task_1())
return dag
But when I try to inherit this class, I can't see my dag in the interface:
class MyCustomDag(AbstractDag):
def task_1(self):
return 2
#abstractmethod
def task_2(self, data):
print(data)
custom_dag = MyCustomDag()
dag_object = custom_dag.dag_wrapper()
Do you have any idea how to do this? or better ideas to abstract this?
Thanks a lot!
Nicolas
I was able to get your example DAG to render in the UI with just a couple small tweaks:
The MyCustomDag.task_2 method doesn't need to be decorated as an abstractmethod.
Using dag() as the wrapped DAG object function name has its issues since it's also a decorator name.
In the AbstractDag.dag_wrapper method you do need to call the #dag-decorated function.
Here is the code I used:
from abc import ABC, abstractmethod
from airflow.decorators import dag, task
from datetime import datetime
class AbstractDag(ABC):
#abstractmethod
def task_1(self):
"""task 1"""
#abstractmethod
def task_2(self, data):
"""task 2"""
def dag_wrapper(self):
#dag(schedule_interval=None, start_date=datetime(2022, 1, 1))
def _dag():
#task(task_id='task_1')
def task_1():
return self.task_1()
#task(task_id='task_2')
def task_2(data):
return self.task_2(data)
task_2(task_1())
return _dag()
class MyCustomDag(AbstractDag):
def task_1(self):
return 2
def task_2(self, data):
print(data)
custom_dag = MyCustomDag()
dag_object = custom_dag.dag_wrapper()
It's worth noting the following from the Airflow docs :
When searching for DAGs inside the DAG_FOLDER, Airflow only considers Python files that contain the strings airflow and dag (case-insensitively) as an optimization.
To consider all Python files instead, disable the
DAG_DISCOVERY_SAFE_MODE configuration flag.
If you're inheriting from AbstractDag in a different file, make sure airflow and dag are in that file. You can simply add a comment with those words.
I have a test like:
import somemodule
import somemodule2
class SomeTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
self.ft_mock = mock.MagicMock(spec=somemodule.SomeClass,
name='mock_it')
def tearDown(self) -> None:
self.ft_mock.reset_mock(return_value=True, side_effect=True)
#mock.patch('somemodule2.someFunk2',
name = 'mock_2',
spec=True,
side_effect='some val')
def testSomething(self, mock_rr):
m = self.ft_mock
m.addMemberSomething()
self.assertEqual(len(m.addMember.call_args_list), 1)
m.addSomethingElse.return_value = 'RETURN IT'
m.addSomethingElse()
m.addSomethingElse.assert_called_once()
res = somemodule.FooClass.foo()
mock_rr.assert_called()
Here somemodule.FooClass.foo() internally calls someFunk2 from somemodule2 which has been mocked as mock_rr
In the test debug it does call it as i print out a line from someFunk2 but testing it when mock_rr.assert_called() is called, it throws:
AssertionError: Expected 'mock_2' to have been called.
I've tried several ways using patch and patch.object
The issue was that somemodule also imported someFunk2 from somemodule2.
So when patching/mocking, the patched object was from somemodule2 while the object that Needed to be patched was somemodule.someFunk2
I am working on a design pattern to make my python unittest as a POM, so far I have written my page classes in modules HomePageObject.py,FilterPageObject.py, my base class (for common stuff)TestBase in BaseTest.py, my testcase modules are TestCase1.py and TestCase2.py and one runner module runner.py.
In runner class i am using loader.getTestCaseNames to get all the tests from a testcase class of a module. In both the testcase modules the name of the test class is same 'Test' and also the method name is same 'testName'
Since the names are confilicting while importing it in runner, only one test is getting executed. I want python to scan all the modules that i specify for tests in them and run those even the name of classes are same.
I got to know that nose might be helpful in this, but not sure how i can implement it here. Any advice ?
BaseTest.py
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import ChromeOptions
import unittest
class TestBase(unittest.TestCase):
driver = None
def __init__(self,testName,browser):
self.browser = browser
super(TestBase,self).__init__(testName)
def setUp(self):
if self.browser == "firefox":
TestBase.driver = webdriver.Firefox()
elif self.browser == "chrome":
options = ChromeOptions()
options.add_argument("--start-maximized")
TestBase.driver = webdriver.Chrome(chrome_options=options)
self.url = "https://www.airbnb.co.in/"
self.driver = TestBase.getdriver()
TestBase.driver.implicitly_wait(10)
def tearDown(self):
self.driver.quit()
#staticmethod
def getdriver():
return TestBase.driver
#staticmethod
def waitForElementVisibility(locator, expression, message):
try:
WebDriverWait(TestBase.driver, 20).\
until(EC.presence_of_element_located((locator, expression)),
message)
return True
except:
return False
TestCase1.py and TestCase2.py (same)
from airbnb.HomePageObject import HomePage
from airbnb.BaseTest import TestBase
class Test(TestBase):
def __init__(self,testName,browser):
super(Test,self).__init__(testName,browser)
def testName(self):
try:
self.driver.get(self.url)
h_page = HomePage()
f_page = h_page.seachPlace("Sicily,Italy")
f_page.selectExperience()
finally:
self.driver.quit()
runner.py
import unittest
from airbnb.TestCase1 import Test
from airbnb.TestCase2 import Test
loader = unittest.TestLoader()
test_names = loader.getTestCaseNames(Test)
suite = unittest.TestSuite()
for test in test_names:
suite.addTest(Test(test,"chrome"))
runner = unittest.TextTestRunner()
result = runner.run(suite)
Also even that one test case is getting passed, some error message is coming
Ran 1 test in 9.734s
OK
Traceback (most recent call last):
File "F:\eclipse-jee-neon-3-win32\eclipse\plugins\org.python.pydev.core_6.3.3.201805051638\pysrc\runfiles.py", line 275, in <module>
main()
File "F:\eclipse-jee-neon-3-win32\eclipse\plugins\org.python.pydev.core_6.3.3.201805051638\pysrc\runfiles.py", line 97, in main
return pydev_runfiles.main(configuration) # Note: still doesn't return a proper value.
File "F:\eclipse-jee-neon-3-win32\eclipse\plugins\org.python.pydev.core_6.3.3.201805051638\pysrc\_pydev_runfiles\pydev_runfiles.py", line 874, in main
PydevTestRunner(configuration).run_tests()
File "F:\eclipse-jee-neon-3-win32\eclipse\plugins\org.python.pydev.core_6.3.3.201805051638\pysrc\_pydev_runfiles\pydev_runfiles.py", line 773, in run_tests
all_tests = self.find_tests_from_modules(file_and_modules_and_module_name)
File "F:\eclipse-jee-neon-3-win32\eclipse\plugins\org.python.pydev.core_6.3.3.201805051638\pysrc\_pydev_runfiles\pydev_runfiles.py", line 629, in find_tests_from_modules
suite = loader.loadTestsFromModule(m)
File "C:\Python27\lib\unittest\loader.py", line 65, in loadTestsFromModule
tests.append(self.loadTestsFromTestCase(obj))
File "C:\Python27\lib\unittest\loader.py", line 56, in loadTestsFromTestCase
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
TypeError: __init__() takes exactly 3 arguments (2 given)
I did this by searching for all the modules of test classes with a pattern and then used __import__(modulename) and called its Test class with desired parameters,
Here is my runner.py
import unittest
import glob
loader = unittest.TestLoader()
suite = unittest.TestSuite()
test_file_strings = glob.glob('Test*.py')
module_strings = [str[0:len(str)-3] for str in test_file_strings]
for module in module_strings:
mod = __import__(module)
test_names =loader.getTestCaseNames(mod.Test)
for test in test_names:
suite.addTest(mod.Test(test,"chrome"))
runner = unittest.TextTestRunner()
result = runner.run(suite)
This worked but still looking for some organized solutions.
(Not sure why second time its showing Ran 0 tests in 0.000s )
Finding files... done.
Importing test modules ... ..done.
----------------------------------------------------------------------
Ran 2 tests in 37.491s
OK
----------------------------------------------------------------------
Ran 0 tests in 0.000s
OK
this is my testrun,there are cases in the imported testclass
from apa_login import Apa_login
from processmap import Show_processmap
import unittest,HTMLTestRunner,apa_login,processmap
def suite(self):
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(Apa_login)
suite = unittest.TestLoader().loadTestsFromTestCase(Show_processmap)
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
print suite
then it gives me this result??
$ python testrun.py
Ran 0 tests in 0.001s
OK
It should not be 0 tests,right?
You need to pass the TestSuite instance to runner.run() by invoking the suite() function. You're currently passing the suite function itself.
runner.run(suite()) # Pass the TestSuite by invoking suite()
Also note that you should probably remove the self argument from suite() if that function is not part of a class.