Skip to content

Commit

Permalink
Merge pull request #162 from cracraft/jump_34group
Browse files Browse the repository at this point in the history
Add test for 3 and 4 group integrations
  • Loading branch information
cracraft authored Jul 29, 2021
2 parents 2c3b948 + 1be57a3 commit 1e97352
Showing 1 changed file with 122 additions and 28 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@
"\n",
"The data for this test was taken as part of ground testing. The file was originally named MIRM107-E-6021041029_1_493_SE_2016-01-21T04h22m18.fits and is an Imaging file from test IMG-RAD-17 with a point source centered at pixel (702,452) with a slope value of around 300 DN/s. It consists of 5 integrations of 20 frames each. The file was processed into a format compatible with the pipeline using a script called create_data and renamed to jw04192001001_01101_00001_MIRIMAGE_uncal.fits.\n",
"\n",
"There are also two simulated data files used in the testing. In Build 7.8, the jump step was extended so that data files with 3 and 4 groups per integration will be flagged for jumps rather than skipping the jump step. Two data files meeting these criteria were created for this test.\n",
"\n",
"\n",
"### Calibration WG Requested Algorithm: \n",
"\n",
Expand Down Expand Up @@ -82,7 +84,8 @@
"from tempfile import TemporaryDirectory\n",
"import os\n",
"data_dir = TemporaryDirectory()\n",
"os.chdir(data_dir.name)"
"os.chdir(data_dir.name)\n",
"print(data_dir)"
]
},
{
Expand Down Expand Up @@ -118,13 +121,16 @@
"source": [
"import numpy as np\n",
"import os\n",
"from jwst.datamodels import RampModel, SaturationModel, dqflags\n",
"\n",
"from jwst.datamodels import RampModel, ImageModel, dqflags\n",
"from jwst.pipeline import Detector1Pipeline\n",
"from jwst.jump import JumpStep\n",
"\n",
"from ci_watson.artifactory_helpers import get_bigdata\n",
"import inspect\n",
"from IPython.display import Markdown\n",
"import matplotlib.pyplot as plt"
"import matplotlib.pyplot as plt\n",
"import glob"
]
},
{
Expand Down Expand Up @@ -187,31 +193,24 @@
"source": [
"# read in Input files\n",
"\n",
"file = get_bigdata('jwst_validation_notebooks',\n",
"\n",
"input_files = ['jw04192001001_01101_00001_MIRIMAGE_uncal.fits', \n",
" 'det_image_seq1_MIRIMAGE_F1130Wexp1_3groups.fits',\n",
" 'det_image_seq1_MIRIMAGE_F1130Wexp1_4groups.fits']\n",
"\n",
"for file in input_files:\n",
" input_file = get_bigdata('jwst_validation_notebooks',\n",
" 'validation_data',\n",
" 'jump',\n",
" 'jump_miri_test', \n",
" 'jw04192001001_01101_00001_MIRIMAGE_uncal.fits')\n",
" 'jump_miri_test',\n",
" file)\n",
"\n",
"satfile = get_bigdata('jwst_validation_notebooks',\n",
"#This readnoise file is needed for use with simulated data which has higher readnoise than actual data.\n",
"readnoise = get_bigdata('jwst_validation_notebooks',\n",
" 'validation_data',\n",
" 'jump', \n",
" 'jump_miri_test', \n",
" 'miri_sat_55k.fits')\n",
"\n",
"filename = 'jw04192001001_01101_00001_MIRIMAGE_uncal.fits'\n",
"\n",
"# Temporary fix until the file can be loaded into artifactory\n",
"\n",
"#hduref = fits.open('jw04192001001_01101_00001_MIRIMAGE_uncal.fits') # read in each file\n",
"#hd = hduref[0].header\n",
"#hd['DATAMODL'] = 'RampModel'\n",
"#hduref.writeto('jw04192001001_01101_00001_MIRIMAGE_uncal.fits', overwrite=True)\n",
"\n",
"satfilename = 'miri_sat_55k.fits' \n",
" \n",
"im = RampModel('jw04192001001_01101_00001_MIRIMAGE_uncal.fits')\n",
"sat = SaturationModel('miri_sat_55k.fits')"
" 'jwst_mirisim_readnoise.fits')\n"
]
},
{
Expand Down Expand Up @@ -267,20 +266,21 @@
"metadata": {},
"outputs": [],
"source": [
"filename = 'jw04192001001_01101_00001_MIRIMAGE_uncal.fits'\n",
"im = RampModel('jw04192001001_01101_00001_MIRIMAGE_uncal.fits')\n",
"\n",
"imagefile = str(im.meta.filename)\n",
"print(imagefile)\n",
"\n",
"# loop through arrays of x, y and crmags to populate array with values\n",
"for x, y, crmag in zip(xpos, ypos, crmags):\n",
" # add cr to ramps from point of 'frame' in ramp\n",
" im.data[integration, frame:, y, x] = im.data[integration, frame:, y, x] + crmag\n",
"\n",
"\n",
" \n",
"# run cube with cr hits through jump\n",
"# set up pipeline parameters for input\n",
"pipe1 = Detector1Pipeline()\n",
"pipe1.jump.rejection_threshold = rej_thresh\n",
"pipe1.saturation.override_saturation = satfile\n",
" \n",
"# set up output file name\n",
"base, remainder = imagefile.split('_uncal')\n",
Expand All @@ -293,7 +293,7 @@
"pipe1.output_file = outname + '.fits'\n",
"\n",
"\n",
"# Run pipeline on each file\n",
"# Run pipeline on file\n",
"pipe1.run(im)\n",
"\n",
"print('Pipeline run finished')"
Expand Down Expand Up @@ -401,14 +401,108 @@
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Test if three and four group data is being flagged appropriately\n",
"\n",
"Read in data files with three and four group data (simulated files), run through detector1 and check if the pixels are being flagged and that not too many pixels are being flagged.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"smallgroup_files = ['det_image_seq1_MIRIMAGE_F1130Wexp1_3groups.fits',\n",
" 'det_image_seq1_MIRIMAGE_F1130Wexp1_4groups.fits']\n",
"\n",
"# Run the calwebb_detector1 pipeline\n",
"\n",
"# set up pipeline parameters \n",
"rej_thresh=10.0 # rejection threshold for jump step\n",
"\n",
"print('There are ', len(smallgroup_files), ' images.')\n",
" \n",
"# loop over list of files\n",
"for file in input_files:\n",
" \n",
" # set up pipeline parameters for input\n",
" pipe1 = Detector1Pipeline()\n",
" pipe1.jump.rejection_threshold = rej_thresh\n",
" pipe1.jump.override_readnoise = readnoise\n",
" pipe1.ramp_fit.override_readnoise = readnoise\n",
" \n",
" pipe1.refpix.skip = True # needs update to simulator for this to work properly with simulated data\n",
" \n",
" # set up output file name\n",
" base, remainder = file.split('.')\n",
" outname = base\n",
" \n",
" pipe1.output_file = outname+'.fits'\n",
"\n",
" # Run pipeline on each file\n",
" rampfile = pipe1.run(file)\n",
" \n",
"print('Detector 1 steps completed on all files.')\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Test that the jump step isn't flagging too many pixels for the jump step in three and four group data. (Look at all files, but there is a known issue in Build 7.8 where MIRI data with 3 and 4 group integrations get all pixels flagged as jumps.) This test will show when the problem is fixed in the pipeline."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Read in each ramp file output from outlier_detection and see percentage of pixels flagged as jumps\n",
"ratefiles = glob.glob('*rate.fits')\n",
"\n",
"flag_thresh = 1.0 # Percentage above which user should be notified of high percentage of flagged pixels\n",
"\n",
"for slopefile in ratefiles: \n",
" file = ImageModel(slopefile)\n",
" nx = file.meta.subarray.xsize\n",
" ny = file.meta.subarray.ysize\n",
" filename = file.meta.filename\n",
" print(filename)\n",
"\n",
" numpix = nx * ny\n",
" \n",
" # Count number of pixels flagged as JUMP_DET\n",
" jumpcount = (file.dq & dqflags.pixel['JUMP_DET'] > 0).sum()\n",
" print('There are ', jumpcount, ' pixels flagged as jumps.')\n",
" \n",
" percentflagged = (jumpcount / numpix) * 100.\n",
"\n",
" print('The percentage of pixels flagged is ', percentflagged)\n",
" if percentflagged > flag_thresh:\n",
" print('This percentage is higher than it should be. Review data through jump step')\n",
" print('\\n') \n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a id=\"about_ID\"></a>\n",
"## About this Notebook\n",
"**Author:** M. Cracraft, Senior Staff Scientist, INS/MIRI\n",
"<br>**Updated On:** 05/29/2020"
"<br>**Updated On:** 07/28/2021"
]
},
{
Expand Down Expand Up @@ -436,7 +530,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
"version": "3.9.5"
}
},
"nbformat": 4,
Expand Down

0 comments on commit 1e97352

Please sign in to comment.